Skip to content

Commit ed284ab

Browse files
authored
Merge 2f38cef into b04a973
2 parents b04a973 + 2f38cef commit ed284ab

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+1856
-1699
lines changed

NDTensors/Project.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
88
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
99
Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
1010
FLoops = "cc61a311-1640-44b5-9fba-1b764f453329"
11+
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
1112
Folds = "41a02a25-b8f0-4f67-bc48-60067656b558"
1213
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
1314
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
@@ -49,3 +50,4 @@ Strided = "0.3, 1, 2"
4950
TimerOutputs = "0.5.5"
5051
TupleTools = "1.2.0"
5152
julia = "1.6"
53+

NDTensors/ext/NDTensorsCUDAExt/adapt.jl

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
## Will not allow us to properly utilize the buffer preference without changing the value of
44
## default_buffertype. Also `adapt(CuVector{<:Any, <:Any, Buffertype})` fails to work properly
55
struct NDTensorCuArrayAdaptor{B} end
6-
## TODO make this work for unified. This works but overwrites CUDA's adapt_storage. This fails for emptystorage...
6+
## TODO make this work for unified. This works but overwrites CUDA's adapt_storage.
77
function cu(xs; unified::Bool=false)
88
return fmap(
99
x -> adapt(NDTensorCuArrayAdaptor{unified ? Mem.UnifiedBuffer : Mem.DeviceBuffer}(), x),
@@ -18,10 +18,3 @@ function Adapt.adapt_storage(adaptor::NDTensorCuArrayAdaptor, xs::AbstractArray)
1818
BufT = buffertype(adaptor)
1919
return isbits(xs) ? xs : CuArray{ElT,1,BufT}(xs)
2020
end
21-
22-
function NDTensors.adapt_storagetype(
23-
adaptor::NDTensorCuArrayAdaptor, xs::Type{EmptyStorage{ElT,StoreT}}
24-
) where {ElT,StoreT}
25-
BufT = buffertype(adaptor)
26-
return NDTensors.emptytype(NDTensors.adapt_storagetype(CuVector{ElT,BufT}, StoreT))
27-
end

NDTensors/ext/NDTensorsCUDAExt/imports.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import NDTensors: cu, set_ndims, set_eltype, set_eltype_if_unspecified, similartype
1+
import NDTensors: cu, set_ndims, set_eltype, specify_eltype, similartype
22
import NDTensors:
33
ContractionProperties, _contract!, GemmBackend, auto_select_backend, _gemm!
44
import NDTensors.SetParameters: nparameters, get_parameter, set_parameter, default_parameter

NDTensors/ext/NDTensorsMetalExt/imports.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import NDTensors: mtl, set_ndims, set_eltype, set_eltype_if_unspecified
1+
import NDTensors: mtl, set_ndims, set_eltype, specify_eltype
22
import NDTensors.SetParameters: nparameters, get_parameter, set_parameter, default_parameter
33

44
using Metal: DefaultStorageMode

NDTensors/src/NDTensors.jl

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ using Adapt
44
using Base.Threads
55
using Compat
66
using Dictionaries
7+
using FillArrays
78
using FLoops
89
using Folds
910
using Random
@@ -32,6 +33,11 @@ using Base.Threads: @spawn
3233
include("imports.jl")
3334
include("exports.jl")
3435

36+
#####################################
37+
# Unspecified Element Types
38+
#
39+
include("unspec_eltype/unspecified_zero.jl")
40+
3541
#####################################
3642
# General functionality
3743
#
@@ -44,7 +50,6 @@ include("abstractarray/ndims.jl")
4450
include("abstractarray/fill.jl")
4551
include("array/set_types.jl")
4652
include("tupletools.jl")
47-
include("emptynumber.jl")
4853
include("nodata.jl")
4954
include("tensorstorage/tensorstorage.jl")
5055
include("tensorstorage/set_types.jl")
@@ -55,9 +60,18 @@ include("dims.jl")
5560
include("tensor/set_types.jl")
5661
include("tensor/similar.jl")
5762
include("adapt.jl")
63+
include("abstractarray/data_isa.jl")
5864
include("tensoralgebra/generic_tensor_operations.jl")
5965
include("tensoralgebra/contraction_logic.jl")
6066

67+
#####################################
68+
# Zeros
69+
#
70+
include("zeros/unallocated_zeros.jl")
71+
include("zeros/similar.jl")
72+
include("zeros/set_types.jl")
73+
include("zeros/allocate.jl")
74+
6175
#####################################
6276
# DenseTensor and DiagTensor
6377
#
@@ -101,14 +115,6 @@ include("blocksparse/similar.jl")
101115
include("blocksparse/combiner.jl")
102116
include("blocksparse/linearalgebra.jl")
103117

104-
#####################################
105-
# Empty
106-
#
107-
include("empty/empty.jl")
108-
include("empty/EmptyTensor.jl")
109-
include("empty/tensoralgebra/contract.jl")
110-
include("empty/adapt.jl")
111-
112118
#####################################
113119
# Deprecations
114120
#
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
data_isa(t::Tensor, datatype::Type) = data_isa(storage(t), datatype)
2+
data_isa(s::TensorStorage, datatype::Type) = data_isa(data(s), datatype)
3+
data_isa(d::AbstractArray, datatype::Type) = d isa datatype # Might have to unwrap if it is reshaped, sliced, etc.
4+
## This is for the UniformDiag type can be removed
5+
## after switching to FillArrays.Diag
6+
data_isa(n::Number, datatype::Type) = false

NDTensors/src/abstractarray/fill.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ function generic_randn(arraytype::Type{<:AbstractArray}, dim::Integer=0)
1010
return data
1111
end
1212

13+
## TODO: Should generic Zeros by default construct an NDTensors.Zeros with an alloc type of `NDTensors.default_datatype()` ?
1314
function generic_zeros(arraytype::Type{<:AbstractArray}, dim::Integer=0)
1415
arraytype_specified = set_unspecified_parameters(
1516
leaf_parenttype(arraytype), DefaultParameters()

NDTensors/src/abstractarray/set_types.jl

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -37,30 +37,37 @@ function set_indstype(arraytype::Type{<:AbstractArray}, dims::Tuple)
3737
return set_ndims(arraytype, length(dims))
3838
end
3939

40-
function set_eltype_if_unspecified(
40+
function specify_eltype(
4141
arraytype::Type{<:AbstractArray{T}}, eltype::Type=default_eltype()
4242
) where {T}
4343
return arraytype
4444
end
4545

4646
#TODO transition to set_eltype when working for wrapped types
47-
function set_eltype_if_unspecified(
48-
arraytype::Type{<:AbstractArray}, eltype::Type=default_eltype()
49-
)
47+
function specify_eltype(arraytype::Type{<:AbstractArray}, eltype::Type=default_eltype())
5048
return similartype(arraytype, eltype)
5149
end
5250

53-
function set_parameter_if_unspecified(
51+
const RealOrComplex{T} = Union{T,Complex{T}}
52+
function specify_eltype(
53+
arraytype::Type{<:AbstractArray{<:RealOrComplex{UnspecifiedZero}}},
54+
elt::Type=default_eltype(),
55+
)
56+
elt = promote_type(elt, eltype(arraytype))
57+
return set_eltype(arraytype, elt)
58+
end
59+
60+
function specify_parameters(
5461
arraytype::Type{<:AbstractArray{ElT,N}}, eltype::Type=default_eltype(), ndims::Integer=1
5562
) where {ElT,N}
5663
return arraytype
5764
end
58-
function set_parameter_if_unspecified(
65+
function specify_parameters(
5966
arraytype::Type{<:AbstractArray{ElT}}, eltype::Type=default_eltype(), ndims::Integer=1
6067
) where {ElT}
6168
return set_ndims(arraytype, ndims)
6269
end
63-
function set_parameter_if_unspecified(
70+
function specify_parameters(
6471
arraytype::Type{<:AbstractArray}, eltype::Type=default_eltype(), ndims::Integer=1
6572
)
6673
return set_eltype(set_ndims(arraytype, ndims), eltype)

NDTensors/src/adapt.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ adapt_structure(to, x::Tensor) = setstorage(x, adapt(to, storage(x)))
44
cpu(eltype::Type{<:Number}, x) = fmap(x -> adapt(Array{eltype}, x), x)
55
cpu(x) = fmap(x -> adapt(Array, x), x)
66

7-
# Implemented in `ITensorGPU` and NDTensorCUDA
7+
# Implemented in `ITensorGPU` and NDTensorsCUDAExt
88
function cu end
99

1010
function mtl end
@@ -30,9 +30,9 @@ double_precision(x) = fmap(x -> adapt(double_precision(eltype(x)), x), x)
3030
#
3131

3232
function adapt_storagetype(to::Type{<:AbstractVector}, x::Type{<:TensorStorage})
33-
return set_datatype(x, set_eltype_if_unspecified(to, eltype(x)))
33+
return set_datatype(x, specify_eltype(to, eltype(x)))
3434
end
3535

3636
function adapt_storagetype(to::Type{<:AbstractArray}, x::Type{<:TensorStorage})
37-
return set_datatype(x, set_eltype_if_unspecified(set_ndims(to, 1), eltype(x)))
37+
return set_datatype(x, specify_eltype(set_ndims(to, 1), eltype(x)))
3838
end

NDTensors/src/blocksparse/blocksparse.jl

Lines changed: 29 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
# BlockSparse storage
33
#
44

5+
## BlockSparse constructors
56
struct BlockSparse{ElT,VecT,N} <: TensorStorage{ElT}
67
data::VecT
78
blockoffsets::BlockOffsets{N} # Block number-offset pairs
@@ -12,36 +13,16 @@ struct BlockSparse{ElT,VecT,N} <: TensorStorage{ElT}
1213
end
1314
end
1415

15-
# TODO: Implement as `fieldtype(storagetype, :data)`.
16-
datatype(::Type{<:BlockSparse{<:Any,DataT}}) where {DataT} = DataT
17-
# TODO: Implement as `ndims(blockoffsetstype(storagetype))`.
18-
ndims(storagetype::Type{<:BlockSparse{<:Any,<:Any,N}}) where {N} = N
19-
# TODO: Implement as `fieldtype(storagetype, :blockoffsets)`.
20-
blockoffsetstype(storagetype::Type{<:BlockSparse}) = BlockOffsets{ndims(storagetype)}
21-
22-
function set_datatype(storagetype::Type{<:BlockSparse}, datatype::Type{<:AbstractVector})
23-
return BlockSparse{eltype(datatype),datatype,ndims(storagetype)}
24-
end
25-
26-
function set_ndims(storagetype::Type{<:BlockSparse}, ndims)
27-
return BlockSparse{eltype(storagetype),datatype(storagetype),ndims}
28-
end
29-
30-
# TODO: Write as `(::Type{<:BlockSparse})()`.
31-
BlockSparse{ElT,DataT,N}() where {ElT,DataT,N} = BlockSparse(DataT(), BlockOffsets{N}())
32-
3316
function BlockSparse(
3417
datatype::Type{<:AbstractArray}, blockoffsets::BlockOffsets, dim::Integer; vargs...
3518
)
36-
return BlockSparse(
37-
fill!(NDTensors.similar(datatype, dim), zero(eltype(datatype))), blockoffsets; vargs...
38-
)
19+
return BlockSparse(generic_zeros(datatype, dim), blockoffsets; vargs...)
3920
end
4021

4122
function BlockSparse(
4223
eltype::Type{<:Number}, blockoffsets::BlockOffsets, dim::Integer; vargs...
4324
)
44-
return BlockSparse(Vector{eltype}, blockoffsets, dim; vargs...)
25+
return BlockSparse(default_datatype(eltype), blockoffsets, dim; vargs...)
4526
end
4627

4728
function BlockSparse(x::Number, blockoffsets::BlockOffsets, dim::Integer; vargs...)
@@ -51,16 +32,38 @@ end
5132
function BlockSparse(
5233
::Type{ElT}, ::UndefInitializer, blockoffsets::BlockOffsets, dim::Integer; vargs...
5334
) where {ElT<:Number}
54-
return BlockSparse(Vector{ElT}(undef, dim), blockoffsets; vargs...)
35+
return BlockSparse(default_datatype(ElT)(undef, dim), blockoffsets; vargs...)
36+
end
37+
38+
function BlockSparse(::UndefInitializer, blockoffsets::BlockOffsets, dim::Integer; vargs...)
39+
return BlockSparse(NDTensors.default_eltype(), undef, blockoffsets, dim; vargs...)
5540
end
5641

5742
function BlockSparse(blockoffsets::BlockOffsets, dim::Integer; vargs...)
58-
return BlockSparse(Float64, blockoffsets, dim; vargs...)
43+
return BlockSparse(default_datatype(default_eltype()), blockoffsets, dim; vargs...)
5944
end
6045

61-
function BlockSparse(::UndefInitializer, blockoffsets::BlockOffsets, dim::Integer; vargs...)
62-
return BlockSparse(Float64, undef, blockoffsets, dim; vargs...)
46+
# TODO: Write as `(::Type{<:BlockSparse})()`.
47+
BlockSparse{ElT,DataT,N}() where {ElT,DataT,N} = BlockSparse(DataT(), BlockOffsets{N}())
48+
49+
## End BlockSparse constructors
50+
51+
## Blocksparse fieldtypes
52+
# TODO: Implement as `fieldtype(storagetype, :data)`.
53+
datatype(::Type{<:BlockSparse{<:Any,DataT}}) where {DataT} = DataT
54+
# TODO: Implement as `ndims(blockoffsetstype(storagetype))`.
55+
ndims(storagetype::Type{<:BlockSparse{<:Any,<:Any,N}}) where {N} = N
56+
# TODO: Implement as `fieldtype(storagetype, :blockoffsets)`.
57+
blockoffsetstype(storagetype::Type{<:BlockSparse}) = BlockOffsets{ndims(storagetype)}
58+
59+
function set_datatype(storagetype::Type{<:BlockSparse}, datatype::Type{<:AbstractVector})
60+
return BlockSparse{eltype(datatype),datatype,ndims(storagetype)}
61+
end
62+
63+
function set_ndims(storagetype::Type{<:BlockSparse}, ndims)
64+
return BlockSparse{eltype(storagetype),datatype(storagetype),ndims}
6365
end
66+
## end blocksparse fieldtypes
6467

6568
copy(D::BlockSparse) = BlockSparse(copy(data(D)), copy(blockoffsets(D)))
6669

NDTensors/src/blocksparse/blocksparsetensor.jl

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ function BlockSparseTensor(
4242
end
4343

4444
function BlockSparseTensor(eltype::Type{<:Number}, blockoffsets::BlockOffsets, inds)
45-
return BlockSparseTensor(Vector{eltype}, blockoffsets, inds)
45+
return BlockSparseTensor(default_datatype(eltype), blockoffsets, inds)
4646
end
4747

4848
function BlockSparseTensor(blockoffsets::BlockOffsets, inds)
@@ -61,7 +61,7 @@ function BlockSparseTensor(datatype::Type{<:AbstractArray}, inds)
6161
end
6262

6363
function BlockSparseTensor(eltype::Type{<:Number}, inds)
64-
return BlockSparseTensor(Vector{eltype}, inds)
64+
return BlockSparseTensor(default_datatype(eltype), inds)
6565
end
6666

6767
"""
@@ -79,6 +79,14 @@ end
7979
Construct a block sparse tensor with the specified blocks.
8080
Defaults to setting structurally non-zero blocks to zero.
8181
"""
82+
function BlockSparseTensor(
83+
datat::Type{<:AbstractArray}, blocks::Vector{BlockT}, inds
84+
) where {BlockT<:Union{Block,NTuple}}
85+
boffs, nnz = blockoffsets(blocks, inds)
86+
storage = BlockSparse(datat, boffs, nnz)
87+
return tensor(storage, inds)
88+
end
89+
8290
function BlockSparseTensor(blocks::Vector{BlockT}, inds) where {BlockT<:Union{Block,NTuple}}
8391
return BlockSparseTensor(Float64, blocks, inds)
8492
end
@@ -202,6 +210,9 @@ end
202210
@propagate_inbounds function getindex(
203211
T::BlockSparseTensor{ElT,N}, i::Vararg{Int,N}
204212
) where {ElT,N}
213+
if is_unallocated_zeros(T)
214+
return zero(ElT)
215+
end
205216
offset, _ = indexoffset(T, i...)
206217
isnothing(offset) && return zero(ElT)
207218
return storage(T)[offset]
@@ -225,6 +236,10 @@ end
225236
function insertblock_offset!(T::BlockSparseTensor{ElT,N}, newblock::Block{N}) where {ElT,N}
226237
newdim = blockdim(T, newblock)
227238
newoffset = nnz(T)
239+
if newblock eachnzblock(T)
240+
return offset(T, newblock)
241+
end
242+
228243
insert!(blockoffsets(T), newblock, newoffset)
229244
# Insert new block into data
230245
# TODO: Make GPU-friendly
@@ -849,6 +864,26 @@ function permute_combine(
849864
return R
850865
end
851866

867+
function dropzeros(T::Tensor; tol=0)
868+
v = Base.similar(NDTensors.data(T), 0)
869+
bloc = Vector{Block{ndims(T)}}(undef, 0)
870+
off = Vector{Int64}(undef, 0)
871+
length = Int64(0)
872+
for b in eachnzblock(T)
873+
Tb = NDTensors.data(T[b])
874+
if norm(Tb) > tol
875+
for i in Tb
876+
push!(v, i)
877+
end
878+
#push!(offsets, BlockOffset(b, length))
879+
push!(bloc, b)
880+
push!(off, length)
881+
length += dim(size(Tb))
882+
end
883+
end
884+
return Tensor(BlockSparse(v, Dictionary(bloc, off)), inds(T))
885+
end
886+
852887
#
853888
# Print block sparse tensors
854889
#

NDTensors/src/combiner/contract.jl

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ function contract!!(
1919
tensor::Tensor,
2020
tensor_labels,
2121
)
22+
output_tensor = allocate(
23+
specify_eltype(typeof(output_tensor), eltype(tensor)), output_tensor
24+
)
2225
if ndims(combiner_tensor) 1
2326
# Empty combiner, acts as multiplying by 1
2427
output_tensor = permutedims!!(

NDTensors/src/dense/dense.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ function HDF5.write(
147147
attributes(g)["type"] = "Dense{$(eltype(Store))}"
148148
attributes(g)["version"] = 1
149149
if eltype(D) != Nothing
150-
write(g, "data", D.data)
150+
write(g, "data", NDTensors.allocate(D).data)
151151
end
152152
end
153153

NDTensors/src/dense/densetensor.jl

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,10 @@ end
183183
# TODO: Remove this in favor of `map!`
184184
# applied to `PermutedDimsArray`.
185185
function permutedims!!(R::DenseTensor, T::DenseTensor, perm, f::Function=(r, t) -> t)
186+
## TODO if unallocatedzero, permute is trivial, just change inds
187+
if is_unallocated_zeros(T)
188+
return R
189+
end
186190
Base.checkdims_perm(R, T, perm)
187191
RR = convert(promote_type(typeof(R), typeof(T)), R)
188192
permutedims!(RR, T, perm, f)

NDTensors/src/dense/fill.jl

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,10 @@ function generic_zeros(
3131
end
3232

3333
function generic_zeros(StoreT::Type{<:Dense{ElT}}, dim::Integer=0) where {ElT}
34+
## TODO see if this works
35+
# DataT = default_storagetype(ElT)
36+
#N = ndims(DataT)
37+
#return generic_zeros(UnallocatedZeros{ElT,N,DataT}, dim)
3438
return generic_zeros(default_storagetype(ElT), dim)
3539
end
3640

0 commit comments

Comments
 (0)