Issues using Block Sparse Multithreading

Hello,
This is my first question here in the ITensor and english isn’t my native language, so sorry for any English mistake.
So i am performing some Quantum Number Conserving DMRG calculations, and i used the ITensors.enable_threaded_blocksparse() command, as suggested in the documentation. Also i used the


BLAS.set_num_threads(1)

commands to avoid some warnings (also suggested in the documentation).
But when i execute my code, it return a big error message:

ERROR: TaskFailedException

    nested task error: BoundsError: attempt to access 8-element Vector{Vector{Tuple{Block{2}, Block{2}, Block{2}}}} at index [9]
    Stacktrace:
     [1] getindex
       @ ./essentials.jl:13 [inlined]
     [2] macro expansion
       @ ~/.julia/packages/NDTensors/gdQSG/src/blocksparse/contract_threads.jl:38 [inlined]
     [3] (::NDTensors.var"#187#189"{Vector{Vector{Tuple{Block{2}, Block{2}, Block{2}}}}, Tuple{Int64, Int64}, Tuple{Int64, Int64}, Tuple{Int64, Int64}, Val{2}, Dictionaries.Indices{Block{2}}, Vector{Block{2}}})()
       @ NDTensors ./threadingconstructs.jl:410
Stacktrace:
  [1] sync_end(c::Channel{Any})
    @ Base ./task.jl:445
  [2] macro expansion
    @ ./task.jl:477 [inlined]
  [3] contract_blocks!(alg::NDTensors.BackendSelection.Algorithm{:threaded_threads, NamedTuple{(), Tuple{}}}, contraction_plans::Vector{Vector{Tuple{Block{2}, Block{2}, Block{2}}}}, boffs1::Dictionaries.Dictionary{Block{2}, Int64}, boffs2::Dictionaries.Dictionary{Block{2}, Int64}, labels1_to_labels2::Tuple{Int64, Int64}, labels1_to_labelsR::Tuple{Int64, Int64}, labels2_to_labelsR::Tuple{Int64, Int64}, ValNR::Val{2})
    @ NDTensors ~/.julia/packages/NDTensors/gdQSG/src/blocksparse/contract_threads.jl:34
  [4] contract_blockoffsets(alg::NDTensors.BackendSelection.Algorithm{:threaded_threads, NamedTuple{(), Tuple{}}}, boffs1::Dictionaries.Dictionary{Block{2}, Int64}, inds1::Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, labels1::Tuple{Int64, Int64}, boffs2::Dictionaries.Dictionary{Block{2}, Int64}, inds2::Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, labels2::Tuple{Int64, Int64}, indsR::Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, labelsR::Tuple{Int64, Int64})
    @ NDTensors ~/.julia/packages/NDTensors/gdQSG/src/blocksparse/contract_generic.jl:43
  [5] contract_blockoffsets
    @ ~/.julia/packages/NDTensors/gdQSG/src/blocksparse/contract.jl:52 [inlined]
  [6] contraction_output(tensor1::NDTensors.BlockSparseTensor{Float64, 2, Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, NDTensors.BlockSparse{Float64, Vector{Float64}, 2}}, labelstensor1::Tuple{Int64, Int64}, tensor2::NDTensors.BlockSparseTensor{Float64, 2, Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, NDTensors.BlockSparse{Float64, Vector{Float64}, 2}}, labelstensor2::Tuple{Int64, Int64}, labelsR::Tuple{Int64, Int64})
    @ NDTensors ~/.julia/packages/NDTensors/gdQSG/src/blocksparse/contract.jl:29
  [7] contract(tensor1::NDTensors.BlockSparseTensor{Float64, 2, Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, NDTensors.BlockSparse{Float64, Vector{Float64}, 2}}, labelstensor1::Tuple{Int64, Int64}, tensor2::NDTensors.BlockSparseTensor{Float64, 2, Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, NDTensors.BlockSparse{Float64, Vector{Float64}, 2}}, labelstensor2::Tuple{Int64, Int64}, labelsR::Tuple{Int64, Int64})
    @ NDTensors ~/.julia/packages/NDTensors/gdQSG/src/blocksparse/contract.jl:10
  [8] contract(tensor1::NDTensors.BlockSparseTensor{Float64, 2, Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, NDTensors.BlockSparse{Float64, Vector{Float64}, 2}}, labelstensor1::Tuple{Int64, Int64}, tensor2::NDTensors.BlockSparseTensor{Float64, 2, Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, NDTensors.BlockSparse{Float64, Vector{Float64}, 2}}, labelstensor2::Tuple{Int64, Int64})
    @ NDTensors ~/.julia/packages/NDTensors/gdQSG/src/blocksparse/contract.jl:10
  [9] _contract(A::NDTensors.BlockSparseTensor{Float64, 2, Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, NDTensors.BlockSparse{Float64, Vector{Float64}, 2}}, B::NDTensors.BlockSparseTensor{Float64, 2, Tuple{Index{Vector{Pair{QN, Int64}}}, Index{Vector{Pair{QN, Int64}}}}, NDTensors.BlockSparse{Float64, Vector{Float64}, 2}})
    @ ITensors ~/.julia/packages/ITensors/oOwvi/src/tensor_operations/tensor_algebra.jl:3
 [10] _contract(A::ITensor, B::ITensor)
    @ ITensors ~/.julia/packages/ITensors/oOwvi/src/tensor_operations/tensor_algebra.jl:9
 [11] contract(A::ITensor, B::ITensor)
    @ ITensors ~/.julia/packages/ITensors/oOwvi/src/tensor_operations/tensor_algebra.jl:74
 [12] *
    @ ~/.julia/packages/ITensors/oOwvi/src/tensor_operations/tensor_algebra.jl:61 [inlined]
 [13] product(A::ITensor, B::ITensor; apply_dag::Bool)
    @ ITensors ~/.julia/packages/ITensors/oOwvi/src/tensor_operations/tensor_algebra.jl:605
 [14] product(A::ITensor, B::ITensor)
    @ ITensors ~/.julia/packages/ITensors/oOwvi/src/tensor_operations/tensor_algebra.jl:580
 [15] op(name::String, s::Index{Vector{Pair{QN, Int64}}}; adjoint::Bool, kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ ITensors.SiteTypes ~/.julia/packages/ITensors/oOwvi/src/lib/SiteTypes/src/sitetype.jl:288
 [16] op
    @ ~/.julia/packages/ITensors/oOwvi/src/lib/SiteTypes/src/sitetype.jl:236 [inlined]
 [17] #op#30
    @ ~/.julia/packages/ITensors/oOwvi/src/lib/SiteTypes/src/sitetype.jl:449 [inlined]
 [18] op(s::Index{Vector{Pair{QN, Int64}}}, opname::String)
    @ ITensors.SiteTypes ~/.julia/packages/ITensors/oOwvi/src/lib/SiteTypes/src/sitetype.jl:449
 [19] (::ITensors.ITensorMPS.var"#calcQN#549"{Vector{Index{Vector{Pair{QN, Int64}}}}, Dict{Pair{String, Int64}, ITensor}})(term::Vector{Op})
    @ ITensors.ITensorMPS ~/.julia/packages/ITensors/oOwvi/src/lib/ITensorMPS/src/opsum_to_mpo/opsum_to_mpo_qn.jl:27
 [20] qn_svdMPO(ValType::Type{Float64}, os::Sum{Scaled{ComplexF64, Prod{Op}}}, sites::Vector{Index{Vector{Pair{QN, Int64}}}}; mindim::Int64, maxdim::Int64, cutoff::Float64)
    @ ITensors.ITensorMPS ~/.julia/packages/ITensors/oOwvi/src/lib/ITensorMPS/src/opsum_to_mpo/opsum_to_mpo_qn.jl:52
 [21] qn_svdMPO
    @ ~/.julia/packages/ITensors/oOwvi/src/lib/ITensorMPS/src/opsum_to_mpo/opsum_to_mpo_qn.jl:5 [inlined]
 [22] qn_svdMPO(os::Sum{Scaled{ComplexF64, Prod{Op}}}, sites::Vector{Index{Vector{Pair{QN, Int64}}}}; kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ ITensors.ITensorMPS ~/.julia/packages/ITensors/oOwvi/src/lib/ITensorMPS/src/opsum_to_mpo/opsum_to_mpo_qn.jl:260
 [23] qn_svdMPO
    @ ~/.julia/packages/ITensors/oOwvi/src/lib/ITensorMPS/src/opsum_to_mpo/opsum_to_mpo_qn.jl:257 [inlined]
 [24] MPO(os::Sum{Scaled{ComplexF64, Prod{Op}}}, sites::Vector{Index{Vector{Pair{QN, Int64}}}}; splitblocks::Bool, kwargs::Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
    @ ITensors.ITensorMPS ~/.julia/packages/ITensors/oOwvi/src/lib/ITensorMPS/src/opsum_to_mpo/opsum_to_mpo_generic.jl:297
 [25] MPO(os::Sum{Scaled{ComplexF64, Prod{Op}}}, sites::Vector{Index{Vector{Pair{QN, Int64}}}})
    @ ITensors.ITensorMPS ~/.julia/packages/ITensors/oOwvi/src/lib/ITensorMPS/src/opsum_to_mpo/opsum_to_mpo_generic.jl:289
 [26] Npart_DMRG(Nsites::Int64, t::Int64, U::Int64, ed::Float64, Npart::Float64)
    @ Main ~/Documentos/ED_em_Julia/IC_Chemical_Potential/DMRG/DMRG_Hubbard.jl:77
 [27] Track_N(nsites::Int64, ed_arr::LinRange{Float64, Int64}, t::Int64, U::Int64)
    @ Main ~/Documentos/ED_em_Julia/IC_Chemical_Potential/DMRG/DMRG_Hubbard_TrackN.jl:32
 [28] Roda_N(U::Int64, nsites_arr::Vector{Int64}, filenames::Vector{String})
    @ Main ~/Documentos/ED_em_Julia/IC_Chemical_Potential/DMRG/DMRG_Hubbard_TrackN.jl:86
 [29] top-level scope
    @ ~/Documentos/ED_em_Julia/IC_Chemical_Potential/DMRG/DMRG_Hubbard_TrackN.jl:106

I sincerely don’t understand this error message (i am new to Julia, by the way).
Here is some piece of code that should emulate mine.

using Pkg

Pkg.add("ITensors")
Pkg.add("Plots")
Pkg.add("CSV")
Pkg.add("DataFrames")
Pkg.add("SparseArrays")
Pkg.add("Arpack")
Pkg.add("LinearAlgebra")
Pkg.add("ProgressBars")

Pkg.instantiate()

using ITensors, Plots, SparseArrays, Arpack, CSV, DataFrames, LinearAlgebra, ProgressBars

Threads.nthreads() = 8

ITensors.enable_threaded_blocksparse()

NDTensors.Strided.get_num_threads()
BLAS.set_num_threads(1)

function Track_N(nsites, ed_arr, t, U)

    GS_trial, Npart_trial_exp = Hubbard_DMRG(nsites, t, U, ed_arr[1]) #This function basicaly performs a DMRG without conserving Number of particles
    Npart_trial = round(sum(Npart_trial_exp))

    global GS_Nconserve = Npart_DMRG(nsites, t, U, ed_arr[1], Npart_trial)
    global GS_Nconservep1 = Npart_DMRG(nsites, t, U, ed_arr[1], Npart_trial + 1)
    global GS_Nconservem1 = Npart_DMRG(nsites, t, U, ed_arr[1], Npart_trial - 1)
        
    if GS_Nconserve < GS_Nconservem1 || GS_Nconserve < GS_Nconservep1
        global Npart = Npart_trial
    elseif GS_Nconservem1 < GS_Nconserve
        global Npart = Npart_trial - 1
    else
        global Npart = Npart_trial + 1
    end

    Npart_arr = zeros(Float64, 0)

    append!(Npart_arr, Npart)
    for  i in tqdm(eachindex(ed_arr)[2:length(ed_arr)]) #The tqdm creates a progress bar in the output
        local GS_Nconserve = Npart_DMRG(nsites, t, U, ed_arr[i], Npart)
        local GS_Nconservep1 = Npart_DMRG(nsites, t, U, ed_arr[i], Npart + 1)
        local GS_Nconservem1 = Npart_DMRG(nsites, t, U, ed_arr[i], Npart - 1)


        if GS_Nconserve < GS_Nconservem1 && GS_Nconserve < GS_Nconservep1
            global Npart += 0
        elseif GS_Nconservem1 < GS_Nconserve
            global Npart -= 1
        elseif GS_Nconservep1 < GS_Nconserve
            global Npart += 1
        end
        append!(Npart_arr, Npart)

    end

    Chem_pot = zeros(Float64, 0)
    for i in tqdm(eachindex(ed_arr))
        GSp1 = Npart_DMRG(nsites, t, U, ed_arr[i], Npart_arr[i]+1)
        GSm1 = Npart_DMRG(nsites, t, U, ed_arr[i], Npart_arr[i]-1)

        mu = (GSp1 - GSm1)/2

        append!(Chem_pot, mu)
    end
        return Chem_pot

end

Nsites = 5

t = 1
u = 10
ed_arr = [-u, 0, u]

mu = Track_N(Nsites, ed_arr, t, u)

I hope this is minimal enough.
Thank you by now!

Hello I tried to run your code block but Hubbard_DMRG is not defined. Thanks!

Thank you for the reply.
So, Hubbard_DMRG is a function in an auxiliary file. It’s here:

function Hubbard_DMRG(Nsites, t, U, ed)
    sites = siteinds("Electron", Nsites, conserve_nf = false)

    os = OpSum()
    for j=1:Nsites
        os += -ed, "Nup",j
        os += -ed,"Ndn",j
    end

    for j=1:Nsites-1
        os += -t, "Cdagup",j+1, "Cup", j
        os += -t, "Cdagup",j, "Cup", j+1

        os += -t,"Cdagdn",j+1, "Cdn", j
        os += -t,"Cdagdn",j, "Cdn", j+1
    end

    for j=1:Nsites
        os += U, "Nupdn",j
    end

    H = MPO(os,sites)

    psi0 = randomMPS(sites;linkdims=10) #Esse comando de Random_MPS so funciona se conserve_qns = false

    #state = [isodd(n) ? "Up" : "Dn" for n=1:Nsites]
    #psi0 = MPS(sites,state)

    nsweeps = 5
    maxdim = [10, 20, 100, 200, 400, 500]
    cutoff = [1E-12]

    GS_energy, GS = dmrg(H,psi0;nsweeps,maxdim,cutoff, outputlevel = 0)

    Nexp = expect(GS, "Ntot")

    return GS_energy, Nexp
end

function Npart_DMRG(Nsites, t, U, ed, Npart)
    sites = siteinds("Electron", Nsites, conserve_qns = true)

    os = OpSum()
    for j=1:Nsites
        os += -ed, "Nup",j
        os += -ed,"Ndn",j
    end

    for j=1:Nsites-1
        os += -t, "Cdagup",j+1, "Cup", j
        os += -t, "Cdagup",j, "Cup", j+1

        os += -t,"Cdagdn",j+1, "Cdn", j
        os += -t,"Cdagdn",j, "Cdn", j+1
    end

    for j=1:Nsites
        os += U, "Nupdn",j
    end

    H = MPO(os,sites)

    state = ["Emp" for n in 1:Nsites]
    p = Npart
    for i in Nsites:-1:1
        if p > i
            state[i] = "UpDn"
            p -= 2
        elseif p > 0
            state[i] = (isodd(i) ? "Up" : "Dn")
            p -= 1
        end
    end
    psi0 = randomMPS(sites,state, linkdims = 10)

    nsweeps = 6
    maxdim = [10, 20, 100, 200, 400, 800]
    cutoff = [1E-12]

    GS_energy, GS = dmrg(H,psi0;nsweeps,maxdim,cutoff, outputlevel = 0)

    return GS_energy

end

Thank you again and sorry for the missing information.

Hi Luan,

I was able to run your example successfully and I did not experience any errors. Can you provide any additional information? What Julia version are you running?

Thanks!
Karl

Hi Karl,
Sorry for the late answer.
My Julia version is 1.9.3, i am using Ubuntu 22.04.
I noticed here i miss typed my first question. The error occurs when i set

NDTensors.Strided.disable_threads()

Instead of

NDTensors.Strided.get_num_threads()

Thank you!