diff --git a/.JuliaFormatter.toml b/.JuliaFormatter.toml deleted file mode 100644 index 9613e05..0000000 --- a/.JuliaFormatter.toml +++ /dev/null @@ -1 +0,0 @@ -style = "yas" \ No newline at end of file diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml index c3ed120..383c502 100644 --- a/.github/workflows/FormatCheck.yml +++ b/.github/workflows/FormatCheck.yml @@ -1,47 +1,15 @@ -name: FormatCheck +name: 'Format' on: - push: - branches: - - 'main' - - 'master' - - 'release-' - tags: '*' - pull_request: + pull_request_target: + paths: ['**/*.jl'] + types: [opened, synchronize, reopened, ready_for_review] -jobs: - build: - runs-on: ${{ matrix.os }} - strategy: - matrix: - version: - - '1' # automatically expands to the latest stable 1.x release of Julia - os: - - ubuntu-latest - arch: - - x64 - steps: - - uses: julia-actions/setup-julia@latest - with: - version: ${{ matrix.version }} - arch: ${{ matrix.arch }} +permissions: + contents: read + actions: write + pull-requests: write - - uses: actions/checkout@v4 - - name: Install JuliaFormatter and format - # This will use the latest version by default but you can set the version like so: - # - # julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter", version="0.13.0"))' - run: | - julia -e 'using Pkg; Pkg.add(PackageSpec(name="JuliaFormatter"))' - julia -e 'using JuliaFormatter; format(".", verbose=true)' - - name: Format check - run: | - julia -e ' - out = Cmd(`git diff --name-only`) |> read |> String - if out == "" - exit(0) - else - @error "Some files have not been formatted !!!" - write(stdout, out) - exit(1) - end' +jobs: + formatcheck: + uses: "QuantumKitHub/QuantumKitHubActions/.github/workflows/FormatCheck.yml@main" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..3e2823c --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,5 @@ +repos: + - repo: https://github.com/fredrikekre/runic-pre-commit + rev: v2.0.1 + hooks: + - id: runic diff --git a/src/SparseArrayKit.jl b/src/SparseArrayKit.jl index fc45fa5..5493fa3 100644 --- a/src/SparseArrayKit.jl +++ b/src/SparseArrayKit.jl @@ -18,7 +18,7 @@ include("linearalgebra.jl") #----------------- using PackageExtensionCompat function __init__() - @require_extensions + return @require_extensions end end diff --git a/src/base.jl b/src/base.jl index 4d4c74b..c25e613 100644 --- a/src/base.jl +++ b/src/base.jl @@ -10,10 +10,10 @@ function Base.:/(x::SparseArray, a::Number) return mul!(similar(x, Base.promote_eltypeof(a, x)), x, inv(a)) end function Base.:+(x::SparseArray, y::SparseArray) - return (T=Base.promote_eltypeof(x, y); axpy!(+one(T), y, copy!(similar(x, T), x))) + return (T = Base.promote_eltypeof(x, y); axpy!(+one(T), y, copy!(similar(x, T), x))) end function Base.:-(x::SparseArray, y::SparseArray) - return (T=Base.promote_eltypeof(x, y); axpy!(-one(T), y, copy!(similar(x, T), x))) + return (T = Base.promote_eltypeof(x, y); axpy!(-one(T), y, copy!(similar(x, T), x))) end Base.:-(x::SparseArray) = LinearAlgebra.lmul!(-one(eltype(x)), copy(x)) @@ -21,7 +21,7 @@ Base.:-(x::SparseArray) = LinearAlgebra.lmul!(-one(eltype(x)), copy(x)) Base.zero(x::SparseArray) = similar(x) Base.iszero(x::SparseArray) = nonzero_length(x) == 0 -function Base.one(x::SparseArray{<:Any,2}) +function Base.one(x::SparseArray{<:Any, 2}) m, n = size(x) m == n || throw(DimensionMismatch("multiplicative identity defined only for square matrices")) diff --git a/src/linearalgebra.jl b/src/linearalgebra.jl index 7c7b491..6f37e32 100644 --- a/src/linearalgebra.jl +++ b/src/linearalgebra.jl @@ -63,33 +63,34 @@ function LinearAlgebra.axpy!(α::Number, x::SparseArray, y::SparseArray) return y end -function LinearAlgebra.norm(x::SparseArray, p::Real=2) +function LinearAlgebra.norm(x::SparseArray, p::Real = 2) return norm(nonzero_values(x), p) end LinearAlgebra.dot(x::SparseArray, y::SparseArray) = inner(x, y) # matrix functions -const SV{T} = SparseArray{T,1} -const SM{T} = SparseArray{T,2} -const ASM{T} = Union{SparseArray{T,2}, - Transpose{T,<:SparseArray{T,2}}, - Adjoint{T,<:SparseArray{T,2}}} +const SV{T} = SparseArray{T, 1} +const SM{T} = SparseArray{T, 2} +const ASM{T} = Union{SparseArray{T, 2}, Transpose{T, <:SparseArray{T, 2}}, Adjoint{T, <:SparseArray{T, 2}}} LinearAlgebra.mul!(C::SM, A::ASM, B::ASM) = mul!(C, A, B, one(eltype(C)), zero(eltype(C))) function LinearAlgebra.mul!(C::SM, A::ASM, B::ASM, α::Number, β::Number) conjA = A isa Adjoint conjB = B isa Adjoint - oindA = A isa Union{Adjoint,Transpose} ? (2,) : (1,) - cindA = A isa Union{Adjoint,Transpose} ? (1,) : (2,) - oindB = B isa Union{Adjoint,Transpose} ? (1,) : (2,) - cindB = B isa Union{Adjoint,Transpose} ? (2,) : (1,) + oindA = A isa Union{Adjoint, Transpose} ? (2,) : (1,) + cindA = A isa Union{Adjoint, Transpose} ? (1,) : (2,) + oindB = B isa Union{Adjoint, Transpose} ? (1,) : (2,) + cindB = B isa Union{Adjoint, Transpose} ? (2,) : (1,) - AA = A isa Union{Adjoint,Transpose} ? parent(A) : A - BB = B isa Union{Adjoint,Transpose} ? parent(B) : B + AA = A isa Union{Adjoint, Transpose} ? parent(A) : A + BB = B isa Union{Adjoint, Transpose} ? parent(B) : B - return tensorcontract!(C, AA, (oindA, cindA), conjA, BB, (cindB, oindB), conjB, - ((1, 2), ()), α, - β) + return tensorcontract!( + C, + AA, (oindA, cindA), conjA, + BB, (cindB, oindB), conjB, + ((1, 2), ()), α, β + ) end LinearAlgebra.adjoint!(C::SM, A::SM) = tensoradd!(C, A, ((2, 1), ()), true, One(), Zero()) diff --git a/src/sparsearray.jl b/src/sparsearray.jl index 8ca1e13..b31e1de 100644 --- a/src/sparsearray.jl +++ b/src/sparsearray.jl @@ -1,20 +1,21 @@ # simple wrapper to give indices a custom wrapping behaviour -struct SparseArray{T,N} <: AbstractArray{T,N} - data::Dict{CartesianIndex{N},T} - dims::NTuple{N,Int64} - function SparseArray{T,N}(::UndefInitializer, dims::Dims{N}) where {T,N} - return new{T,N}(Dict{CartesianIndex{N},T}(), dims) +struct SparseArray{T, N} <: AbstractArray{T, N} + data::Dict{CartesianIndex{N}, T} + dims::NTuple{N, Int64} + function SparseArray{T, N}(::UndefInitializer, dims::Dims{N}) where {T, N} + return new{T, N}(Dict{CartesianIndex{N}, T}(), dims) end - function SparseArray(a::SparseArray{T,N}) where {T,N} - return new{T,N}(copy(a.data), a.dims) + function SparseArray(a::SparseArray{T, N}) where {T, N} + return new{T, N}(copy(a.data), a.dims) end - function SparseArray{T,N}(a::Dict{CartesianIndex{N},T}, - dims::NTuple{N,Int64}) where {T,N} - return new{T,N}(a, dims) + function SparseArray{T, N}( + a::Dict{CartesianIndex{N}, T}, dims::NTuple{N, Int64} + ) where {T, N} + return new{T, N}(a, dims) end end -function SparseArray{T}(::UndefInitializer, dims::Dims{N}) where {T,N} - return SparseArray{T,N}(undef, dims) +function SparseArray{T}(::UndefInitializer, dims::Dims{N}) where {T, N} + return SparseArray{T, N}(undef, dims) end SparseArray{T}(::UndefInitializer, dims...) where {T} = SparseArray{T}(undef, dims) function SparseArray{T}(a::UniformScaling, dims::Dims{2}) where {T} @@ -36,16 +37,17 @@ _zero!(x::SparseArray) = (empty!(x.data); return x) _sizehint!(x::SparseArray, n) = sizehint!(x.data, n) # elementary getindex and setindex! -@inline function Base.getindex(a::SparseArray{T,N}, I::CartesianIndex{N}) where {T,N} +@inline function Base.getindex(a::SparseArray{T, N}, I::CartesianIndex{N}) where {T, N} @boundscheck checkbounds(a, I) return get(a.data, I, zero(T)) end -Base.@propagate_inbounds function Base.getindex(a::SparseArray{T,N}, - I::Vararg{Int,N}) where {T,N} +Base.@propagate_inbounds function Base.getindex( + a::SparseArray{T, N}, I::Vararg{Int, N} + ) where {T, N} return getindex(a, CartesianIndex(I)) end -@inline function Base.setindex!(a::SparseArray{T,N}, v, I::CartesianIndex{N}) where {T,N} +@inline function Base.setindex!(a::SparseArray{T, N}, v, I::CartesianIndex{N}) where {T, N} @boundscheck checkbounds(a, I) if !iszero(v) a.data[I] = v @@ -54,12 +56,14 @@ end end return v end -Base.@propagate_inbounds function Base.setindex!(a::SparseArray{T,N}, - v, I::Vararg{Int,N}) where {T,N} +Base.@propagate_inbounds function Base.setindex!( + a::SparseArray{T, N}, + v, I::Vararg{Int, N} + ) where {T, N} return setindex!(a, v, CartesianIndex(I)) end -@inline function increaseindex!(a::SparseArray{T,N}, v, I::CartesianIndex{N}) where {T,N} +@inline function increaseindex!(a::SparseArray{T, N}, v, I::CartesianIndex{N}) where {T, N} @boundscheck checkbounds(a, I) iszero(v) && return h = a.data @@ -100,8 +104,9 @@ end _findfirstvalue(v, r) = findfirst(==(v), r) # slicing should produce SparseArray -function Base._unsafe_getindex(::IndexCartesian, a::SparseArray{T,N}, - I::Vararg{Union{Int,AbstractVector{Int}},N}) where {T,N} +function Base._unsafe_getindex( + ::IndexCartesian, a::SparseArray{T, N}, I::Vararg{Union{Int, AbstractVector{Int}}, N} + ) where {T, N} @boundscheck checkbounds(a, I...) indices = Base.to_indices(a, I) b = SparseArray{T}(undef, length.(Base.index_shape(indices...))) @@ -114,8 +119,8 @@ function Base._unsafe_getindex(::IndexCartesian, a::SparseArray{T,N}, return b end -Base.Array(a::SparseArray{T,N}) where {T,N} = Array{T,N}(a) -function Base.Array{T,N}(a::SparseArray) where {T,N} +Base.Array(a::SparseArray{T, N}) where {T, N} = Array{T, N}(a) +function Base.Array{T, N}(a::SparseArray) where {T, N} d = fill(zero(T), size(a)) for (I, v) in a.data d[I] = v @@ -123,27 +128,27 @@ function Base.Array{T,N}(a::SparseArray) where {T,N} return d end -SparseArray(a::AbstractArray{T,N}) where {T,N} = SparseArray{T,N}(a) -SparseArray{T}(a::AbstractArray{<:Any,N}) where {T,N} = SparseArray{T,N}(a) -function SparseArray{T,N}(a::AbstractArray{<:Any,N}) where {T,N} - d = SparseArray{T,N}(undef, size(a)) +SparseArray(a::AbstractArray{T, N}) where {T, N} = SparseArray{T, N}(a) +SparseArray{T}(a::AbstractArray{<:Any, N}) where {T, N} = SparseArray{T, N}(a) +function SparseArray{T, N}(a::AbstractArray{<:Any, N}) where {T, N} + d = SparseArray{T, N}(undef, size(a)) for I in CartesianIndices(a) iszero(a[I]) && continue d[I] = a[I] end return d end -Base.convert(::Type{S}, a::S) where {S<:SparseArray} = a +Base.convert(::Type{S}, a::S) where {S <: SparseArray} = a Base.convert(S::Type{<:SparseArray}, a::AbstractArray) = S(a) -function SparseArray(A::Adjoint{T,<:SparseArray{T,2}}) where {T} +function SparseArray(A::Adjoint{T, <:SparseArray{T, 2}}) where {T} B = SparseArray{T}(undef, size(A)) for (I, v) in parent(A).data B[I[2], I[1]] = conj(v) end return B end -function SparseArray(A::Transpose{T,<:SparseArray{T,2}}) where {T} +function SparseArray(A::Transpose{T, <:SparseArray{T, 2}}) where {T} B = SparseArray{T}(undef, size(A)) for (I, v) in parent(A).data B[I[2], I[1]] = v @@ -166,16 +171,18 @@ function Base.copy!(dst::SparseArray, src::SparseArray) return dst end -function Base.similar(::SparseArray, ::Type{S}, dims::Dims{N}) where {S,N} +function Base.similar(::SparseArray, ::Type{S}, dims::Dims{N}) where {S, N} return SparseArray{S}(undef, dims) end # show and friends function Base.show(io::IO, ::MIME"text/plain", x::SparseArray) xnnz = nonzero_length(x) - print(io, join(size(x), "×"), " ", typeof(x), " with ", xnnz, " stored ", - xnnz == 1 ? "entry" : "entries") - if xnnz != 0 + print( + io, join(size(x), "×"), " ", typeof(x), " with ", xnnz, " stored ", + xnnz == 1 ? "entry" : "entries" + ) + return if xnnz != 0 println(io, ":") show(IOContext(io, :typeinfo => eltype(x)), x) end @@ -202,4 +209,5 @@ function Base.show(io::IOContext, x::SparseArray) println(io, " ", join(" " .^ pads, " "), " \u22ee") end end + return end diff --git a/src/tensoroperations.jl b/src/tensoroperations.jl index 33d452c..a961766 100644 --- a/src/tensoroperations.jl +++ b/src/tensoroperations.jl @@ -9,16 +9,20 @@ end function TO.select_backend(::typeof(TO.tensortrace!), C::SparseArray, A::SparseArray) return SparseBackend() end -function TO.select_backend(::typeof(TO.tensorcontract!), - C::SparseArray, A::SparseArray, B::SparseArray) +function TO.select_backend( + ::typeof(TO.tensorcontract!), + C::SparseArray, A::SparseArray, B::SparseArray + ) return SparseBackend() end # Convert to `SparseArray` when forcing `SparseBackend` -function TO.tensoradd!(C::AbstractArray, - A::AbstractArray, pA::Index2Tuple, conjA::Bool, - α::Number, β::Number, - backend::SparseBackend, allocator=DefaultAllocator()) +function TO.tensoradd!( + C::AbstractArray, + A::AbstractArray, pA::Index2Tuple, conjA::Bool, + α::Number, β::Number, + backend::SparseBackend, allocator = DefaultAllocator() + ) if C isa SparseArray TO.tensoradd!(C, SparseArray(A), pA, conjA, α, β, backend, allocator) else @@ -29,10 +33,12 @@ function TO.tensoradd!(C::AbstractArray, return C end -function TO.tensortrace!(C::AbstractArray, - A::AbstractArray, p::Index2Tuple, q::Index2Tuple, conjA::Bool, - α::Number, β::Number, - backend::SparseBackend, allocator=DefaultAllocator()) +function TO.tensortrace!( + C::AbstractArray, + A::AbstractArray, p::Index2Tuple, q::Index2Tuple, conjA::Bool, + α::Number, β::Number, + backend::SparseBackend, allocator = DefaultAllocator() + ) if C isa SparseArray TO.tensortrace!(C, SparseArray(A), p, q, conjA, α, β, backend, allocator) else @@ -43,19 +49,29 @@ function TO.tensortrace!(C::AbstractArray, return C end -function TO.tensorcontract!(C::AbstractArray, - A::AbstractArray, pA::Index2Tuple, conjA::Bool, - B::AbstractArray, pB::Index2Tuple, conjB::Bool, - pAB::Index2Tuple, - α::Number, β::Number, - backend::SparseBackend, allocator=DefaultAllocator()) +function TO.tensorcontract!( + C::AbstractArray, + A::AbstractArray, pA::Index2Tuple, conjA::Bool, + B::AbstractArray, pB::Index2Tuple, conjB::Bool, + pAB::Index2Tuple, + α::Number, β::Number, + backend::SparseBackend, allocator = DefaultAllocator() + ) if C isa SparseArray - TO.tensorcontract!(C, SparseArray(A), pA, conjA, SparseArray(B), pB, conjB, pAB, α, - β, backend, allocator) + TO.tensorcontract!( + C, + SparseArray(A), pA, conjA, + SparseArray(B), pB, conjB, + pAB, α, β, backend, allocator + ) else Csparse = SparseArray(C) - TO.tensorcontract!(Csparse, SparseArray(A), pA, conjA, SparseArray(B), pB, conjB, - pAB, α, β, backend, allocator) + TO.tensorcontract!( + Csparse, + SparseArray(A), pA, conjA, + SparseArray(B), pB, conjB, + pAB, α, β, backend, allocator + ) copy!(C, Csparse) end return C @@ -63,10 +79,12 @@ end # Actual SparseArray implementation of TensorOperations interface #------------------------------------------------------------------------------------------- -function TO.tensoradd!(C::SparseArray, - A::SparseArray, pA::Index2Tuple, conjA::Bool, - α::Number, β::Number, - ::SparseBackend, allocator=DefaultAllocator()) +function TO.tensoradd!( + C::SparseArray, + A::SparseArray, pA::Index2Tuple, conjA::Bool, + α::Number, β::Number, + ::SparseBackend, allocator = DefaultAllocator() + ) TO.argcheck_tensoradd(C, A, pA) TO.dimcheck_tensoradd(C, A, pA) @@ -80,10 +98,12 @@ function TO.tensoradd!(C::SparseArray, return C end -function TO.tensortrace!(C::SparseArray, - A::SparseArray, p::Index2Tuple, q::Index2Tuple, conjA::Bool, - α::Number, β::Number, - ::SparseBackend, allocator=DefaultAllocator()) +function TO.tensortrace!( + C::SparseArray, + A::SparseArray, p::Index2Tuple, q::Index2Tuple, conjA::Bool, + α::Number, β::Number, + ::SparseBackend, allocator = DefaultAllocator() + ) TO.argcheck_tensortrace(C, A, p, q) TO.dimcheck_tensortrace(C, A, p, q) @@ -101,22 +121,28 @@ function TO.tensortrace!(C::SparseArray, return C end -function TO.tensorcontract!(C::SparseArray, - A::SparseArray, pA::Index2Tuple, conjA::Bool, - B::SparseArray, pB::Index2Tuple, conjB::Bool, - pAB::Index2Tuple, - α::Number, β::Number, - ::SparseBackend, allocator=DefaultAllocator()) +function TO.tensorcontract!( + C::SparseArray, + A::SparseArray, pA::Index2Tuple, conjA::Bool, + B::SparseArray, pB::Index2Tuple, conjB::Bool, + pAB::Index2Tuple, + α::Number, β::Number, + ::SparseBackend, allocator = DefaultAllocator() + ) TO.argcheck_tensorcontract(C, A, pA, B, pB, pAB) TO.dimcheck_tensorcontract(C, A, pA, B, pB, pAB) scale!(C, β) pAB_lin = linearize(pAB) - keysA = sort!(collect(nonzero_keys(A)); - by=IA -> CartesianIndex(TupleTools.getindices(IA.I, pA[2]))) - keysB = sort!(collect(nonzero_keys(B)); - by=IB -> CartesianIndex(TupleTools.getindices(IB.I, pB[1]))) + keysA = sort!( + collect(nonzero_keys(A)); + by = IA -> CartesianIndex(TupleTools.getindices(IA.I, pA[2])) + ) + keysB = sort!( + collect(nonzero_keys(B)); + by = IB -> CartesianIndex(TupleTools.getindices(IB.I, pB[1])) + ) iA = iB = 1 @inbounds while iA <= length(keysA) && iB <= length(keysB) @@ -188,12 +214,14 @@ function TO.tensorcontract!(C::SparseArray, end function TO.tensoradd_type(TC, ::SparseArray, pA::Index2Tuple, ::Bool) - return SparseArray{TC,TO.numind(pA)} + return SparseArray{TC, TO.numind(pA)} end -function TO.tensorcontract_type(TC, - ::SparseArray, pA::Index2Tuple, conjA::Bool, - ::SparseArray, pB::Index2Tuple, conjB::Bool, - pAB::Index2Tuple) - return SparseArray{TC,TO.numind(pAB)} +function TO.tensorcontract_type( + TC, + ::SparseArray, pA::Index2Tuple, conjA::Bool, + ::SparseArray, pB::Index2Tuple, conjB::Bool, + pAB::Index2Tuple + ) + return SparseArray{TC, TO.numind(pAB)} end diff --git a/src/vectorinterface.jl b/src/vectorinterface.jl index 728a1fc..893cfbf 100644 --- a/src/vectorinterface.jl +++ b/src/vectorinterface.jl @@ -2,7 +2,7 @@ ################################################################## # zerovector & zerovector!! #--------------------------- -function VectorInterface.zerovector(x::SparseArray, ::Type{S}) where {S<:Number} +function VectorInterface.zerovector(x::SparseArray, ::Type{S}) where {S <: Number} return SparseArray{S}(undef, size(x)) end VectorInterface.zerovector!(x::SparseArray) = _zero!(x) diff --git a/test/basic.jl b/test/basic.jl index fc9195c..6a0bce7 100644 --- a/test/basic.jl +++ b/test/basic.jl @@ -5,7 +5,7 @@ using Test, TestExtras, LinearAlgebra, Random #= generate a whole bunch of random contractions, compare with the dense result =# -function randn_sparse(T::Type{<:Number}, sz::Dims, p=0.5) +function randn_sparse(T::Type{<:Number}, sz::Dims, p = 0.5) a = SparseArray{T}(undef, sz) for I in keys(a) if rand() < p diff --git a/test/contractions.jl b/test/contractions.jl index 67254b6..383f6e7 100644 --- a/test/contractions.jl +++ b/test/contractions.jl @@ -5,7 +5,7 @@ using TensorOperations #= generate a whole bunch of random contractions, compare with the dense result =# -function randn_sparse(T::Type{<:Number}, sz::Dims, p=0.5) +function randn_sparse(T::Type{<:Number}, sz::Dims, p = 0.5) a = SparseArray{T}(undef, sz) for I in keys(a) if rand() < p @@ -25,16 +25,22 @@ end for i in 1:NUM_TESTS contracted_indices = repeat(collect(1:rand(1:MAX_CONTRACTED_INDICES)), 2) open_indices = collect(1:rand(1:MAX_OPEN_INDICES)) - dimensions = [repeat(rand(1:MAX_DIM, Int(length(contracted_indices) / 2)), 2); - rand(1:MAX_DIM, length(open_indices))] + dimensions = [ + repeat(rand(1:MAX_DIM, Int(length(contracted_indices) / 2)), 2); + rand(1:MAX_DIM, length(open_indices)) + ] #generate a random tensor network contraction tensors = SparseArray[] indices = Vector{Int64}[] conjlist = Bool[] while !isempty(contracted_indices) || !isempty(open_indices) - num_inds = rand(1:min(MAX_IND_PER_TENS, - length(contracted_indices) + length(open_indices))) + num_inds = rand( + 1:min( + MAX_IND_PER_TENS, + length(contracted_indices) + length(open_indices) + ) + ) cur_inds = Int64[] cur_dims = Int64[] diff --git a/test/runtests.jl b/test/runtests.jl index 32b7a75..03705cf 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -11,8 +11,8 @@ println("==========================") include("contractions.jl") module AquaSparseArrayKit -using SparseArrayKit, Aqua, Test -@testset "Aqua" verbose = true begin - Aqua.test_all(SparseArrayKit) -end + using SparseArrayKit, Aqua, Test + @testset "Aqua" verbose = true begin + Aqua.test_all(SparseArrayKit) + end end diff --git a/test/vectorinterface.jl b/test/vectorinterface.jl index dffd3cf..2340775 100644 --- a/test/vectorinterface.jl +++ b/test/vectorinterface.jl @@ -5,7 +5,7 @@ using Test, TestExtras, VectorInterface, Random deepcollect(x) = vcat(map(deepcollect, x)...) deepcollect(x::Number) = x -function randn_sparse(T::Type{<:Number}, sz::Dims, p=0.5) +function randn_sparse(T::Type{<:Number}, sz::Dims, p = 0.5) a = SparseArray{T}(undef, sz) for I in keys(a) if rand() < p