Skip to content

Commit 46cc745

Browse files
authored
Merge pull request #129 from JuliaML/plotrecipe
Update plot recipe
2 parents 71041fb + 4bd9d95 commit 46cc745

File tree

9 files changed

+79
-108
lines changed

9 files changed

+79
-108
lines changed

docs/src/introduction/gettingstarted.md

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -113,10 +113,10 @@ avoid allocating a temporary array and directly compute the
113113
result.
114114

115115
```julia-repl
116-
julia> value(L2DistLoss(), true_targets, pred_outputs, AvgMode.Sum())
116+
julia> value(L2DistLoss(), true_targets, pred_outputs, AggMode.Sum())
117117
5.25
118118
119-
julia> value(L2DistLoss(), true_targets, pred_outputs, AvgMode.Mean())
119+
julia> value(L2DistLoss(), true_targets, pred_outputs, AggMode.Mean())
120120
1.75
121121
```
122122

@@ -126,10 +126,10 @@ each observation in the predicted outputs and so allow to give
126126
certain observations a stronger influence over the result.
127127

128128
```julia-repl
129-
julia> value(L2DistLoss(), true_targets, pred_outputs, AvgMode.WeightedSum([2,1,1]))
129+
julia> value(L2DistLoss(), true_targets, pred_outputs, AggMode.WeightedSum([2,1,1]))
130130
5.5
131131
132-
julia> value(L2DistLoss(), true_targets, pred_outputs, AvgMode.WeightedMean([2,1,1]))
132+
julia> value(L2DistLoss(), true_targets, pred_outputs, AggMode.WeightedMean([2,1,1]))
133133
1.375
134134
```
135135

@@ -157,7 +157,7 @@ julia> value(L2DistLoss(), A, B)
157157
0.00161395 0.0423701 0.183882
158158
0.172286 0.0180639 0.00252607
159159
160-
julia> value(L2DistLoss(), A, B, AvgMode.Sum())
160+
julia> value(L2DistLoss(), A, B, AggMode.Sum())
161161
0.420741920634
162162
```
163163

@@ -172,7 +172,7 @@ julia> value(L2DistLoss(), rand(2), rand(2,2))
172172
0.228077 0.597212
173173
0.789808 0.311914
174174
175-
julia> value(L2DistLoss(), rand(2), rand(2,2), AvgMode.Sum())
175+
julia> value(L2DistLoss(), rand(2), rand(2,2), AggMode.Sum())
176176
0.0860658081865589
177177
```
178178

@@ -182,18 +182,18 @@ multivariate regression where one could want to accumulate the
182182
loss per individual observation.
183183

184184
```julia-repl
185-
julia> value(L2DistLoss(), A, B, AvgMode.Sum(), ObsDim.First())
185+
julia> value(L2DistLoss(), A, B, AggMode.Sum(), ObsDim.First())
186186
2-element Array{Float64,1}:
187187
0.227866
188188
0.192876
189189
190-
julia> value(L2DistLoss(), A, B, AvgMode.Sum(), ObsDim.Last())
190+
julia> value(L2DistLoss(), A, B, AggMode.Sum(), ObsDim.Last())
191191
3-element Array{Float64,1}:
192192
0.1739
193193
0.060434
194194
0.186408
195195
196-
julia> value(L2DistLoss(), A, B, AvgMode.WeightedSum([2,1]), ObsDim.First())
196+
julia> value(L2DistLoss(), A, B, AggMode.WeightedSum([2,1]), ObsDim.First())
197197
0.648608280735
198198
```
199199

@@ -287,4 +287,3 @@ If you encounter a bug or would like to participate in the
287287
further development of this package come find us on Github.
288288

289289
- [JuliaML/LossFunctions.jl](https://github.com/JuliaML/LossFunctions.jl)
290-

docs/src/user/aggregate.md

Lines changed: 38 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1+
```@meta
2+
DocTestSetup = quote
3+
using LossFunctions
4+
end
5+
```
6+
17
# Efficient Sum and Mean
28

39
In many situations we are not really that interested in the
@@ -50,14 +56,14 @@ common accumulations efficiently without allocating temporary
5056
arrays. These methods can be invoked using an additional
5157
parameter which specifies how the values should be accumulated /
5258
averaged. The type of this parameter has to be a subtype of
53-
`AverageMode`.
59+
`AggregateMode`.
5460

5561
## Average Modes
5662

5763
Before we discuss these memory-efficient methods, let us briefly
5864
introduce the available average mode types. We provide a number
5965
of different averages modes, all of which are contained within
60-
the namespace `AvgMode`. An instance of such type can then be
66+
the namespace `AggMode`. An instance of such type can then be
6167
used as additional parameter to [`value`](@ref), [`deriv`](@ref),
6268
and [`deriv2`](@ref), as we will see further down.
6369

@@ -66,11 +72,11 @@ a short description of what their effect would be when used as an
6672
additional parameter to the functions mentioned above.
6773

6874
```@docs
69-
AvgMode.None
70-
AvgMode.Sum
71-
AvgMode.Mean
72-
AvgMode.WeightedSum
73-
AvgMode.WeightedMean
75+
AggMode.None
76+
AggMode.Sum
77+
AggMode.Mean
78+
AggMode.WeightedSum
79+
AggMode.WeightedMean
7480
```
7581

7682
## Unweighted Sum and Mean
@@ -82,15 +88,15 @@ broadcasted) results of [`value`](@ref), [`deriv`](@ref), and
8288
temporary array and instead compute the result directly.
8389

8490
```@docs
85-
value(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode)
91+
value(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode)
8692
```
8793

8894
The exact same method signature is also implemented for
8995
[`deriv`](@ref) and [`deriv2`](@ref) respectively.
9096

9197
```@docs
92-
deriv(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode)
93-
deriv2(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode)
98+
deriv(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode)
99+
deriv2(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode)
94100
```
95101

96102
## Sum and Mean per Observation
@@ -110,7 +116,7 @@ that denotes the observations. For that purpose we provide the
110116
types contained in the namespace `ObsDim`.
111117

112118
```@docs
113-
value(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode, ::LearnBase.ObsDimension)
119+
value(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode, ::LearnBase.ObsDimension)
114120
```
115121

116122
Consider the following two matrices, `targets` and `outputs`.
@@ -136,12 +142,12 @@ the observations. Thus this data would consist of two
136142
observations with four variables each.
137143

138144
```jldoctest obsdim
139-
julia> value(L1DistLoss(), targets, outputs, AvgMode.Sum(), ObsDim.First())
145+
julia> value(L1DistLoss(), targets, outputs, AggMode.Sum(), ObsDim.First())
140146
2-element Array{Float64,1}:
141147
1.5
142148
2.0
143149
144-
julia> value(L1DistLoss(), targets, outputs, AvgMode.Mean(), ObsDim.First())
150+
julia> value(L1DistLoss(), targets, outputs, AggMode.Mean(), ObsDim.First())
145151
2-element Array{Float64,1}:
146152
0.375
147153
0.5
@@ -152,14 +158,14 @@ second/last dimension denotes the observations. In that case our
152158
data consists of four observations with two variables each.
153159

154160
```jldoctest obsdim
155-
julia> value(L1DistLoss(), targets, outputs, AvgMode.Sum(), ObsDim.Last())
161+
julia> value(L1DistLoss(), targets, outputs, AggMode.Sum(), ObsDim.Last())
156162
4-element Array{Float64,1}:
157163
0.125
158164
0.625
159165
1.125
160166
1.625
161167
162-
julia> value(L1DistLoss(), targets, outputs, AvgMode.Mean(), ObsDim.Last())
168+
julia> value(L1DistLoss(), targets, outputs, AggMode.Mean(), ObsDim.Last())
163169
4-element Array{Float64,1}:
164170
0.0625
165171
0.3125
@@ -172,17 +178,17 @@ mutating version that can make use a preallocated vector to write
172178
the results into.
173179

174180
```@docs
175-
value!(::AbstractArray, ::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode, ::LearnBase.ObsDimension)
181+
value!(::AbstractArray, ::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode, ::LearnBase.ObsDimension)
176182
```
177183

178184
Naturally we also provide both of these methods for
179185
[`deriv`](@ref) and [`deriv2`](@ref) respectively.
180186

181187
```@docs
182-
deriv(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode, ::LearnBase.ObsDimension)
183-
deriv!(::AbstractArray, ::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode, ::LearnBase.ObsDimension)
184-
deriv2(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode, ::LearnBase.ObsDimension)
185-
deriv2!(::AbstractArray, ::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AverageMode, ::LearnBase.ObsDimension)
188+
deriv(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode, ::LearnBase.ObsDimension)
189+
deriv!(::AbstractArray, ::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode, ::LearnBase.ObsDimension)
190+
deriv2(::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode, ::LearnBase.ObsDimension)
191+
deriv2!(::AbstractArray, ::Loss, ::AbstractArray, ::AbstractArray, ::LossFunctions.AggregateMode, ::LearnBase.ObsDimension)
186192
```
187193

188194
## Weighted Sum and Mean
@@ -233,7 +239,7 @@ each observation (which results in a vector), and then we compute
233239
the weighted sum of all observations.
234240

235241
The following code snipped demonstrates how to compute the
236-
`AvgMode.WeightedSum([2,1])` manually. This is **not** meant as
242+
`AggMode.WeightedSum([2,1])` manually. This is **not** meant as
237243
an example of how to do it, but simply to show what is happening
238244
qualitatively. In this example we assume that we are working in a
239245
multi-variable regression setting, in which our data set has four
@@ -261,7 +267,7 @@ julia> sum(tmp .* [2, 1]) # weigh 1st observation twice as high
261267
5.0
262268
```
263269

264-
To manually compute the result for `AvgMode.WeightedMean([2,1])`
270+
To manually compute the result for `AggMode.WeightedMean([2,1])`
265271
we follow a similar approach, but use the normalized weight
266272
vector in the last step.
267273

@@ -282,8 +288,8 @@ julia> sum(tmp .* [0.6666, 0.3333]) # weigh 1st observation twice as high
282288
Note that you can specify explicitly if you want to normalize the
283289
weight vector. That option is supported for computing the
284290
weighted sum, as well as for computing the weighted mean. See the
285-
documentation for [`AvgMode.WeightedSum`](@ref) and
286-
[`AvgMode.WeightedMean`](@ref) for more information.
291+
documentation for [`AggMode.WeightedSum`](@ref) and
292+
[`AggMode.WeightedMean`](@ref) for more information.
287293

288294
The code-snippets above are of course very inefficient, because
289295
they allocate (multiple) temporary arrays. We only included them
@@ -293,32 +299,32 @@ special methods for [`value`](@ref), [`deriv`](@ref),
293299
[`deriv2`](@ref) and their mutating counterparts.
294300

295301
```jldoctest weight
296-
julia> value(L1DistLoss(), [1.,2,3], [2,5,-2], AvgMode.WeightedSum([1,2,1]))
302+
julia> value(L1DistLoss(), [1.,2,3], [2,5,-2], AggMode.WeightedSum([1,2,1]))
297303
12.0
298304
299-
julia> value(L1DistLoss(), [1.,2,3], [2,5,-2], AvgMode.WeightedMean([1,2,1]))
305+
julia> value(L1DistLoss(), [1.,2,3], [2,5,-2], AggMode.WeightedMean([1,2,1]))
300306
3.0
301307
302-
julia> value(L1DistLoss(), targets, outputs, AvgMode.WeightedSum([2,1]), ObsDim.First())
308+
julia> value(L1DistLoss(), targets, outputs, AggMode.WeightedSum([2,1]), ObsDim.First())
303309
5.0
304310
305-
julia> value(L1DistLoss(), targets, outputs, AvgMode.WeightedMean([2,1]), ObsDim.First())
311+
julia> value(L1DistLoss(), targets, outputs, AggMode.WeightedMean([2,1]), ObsDim.First())
306312
0.4166666666666667
307313
```
308314

309315
We also provide this functionality for [`deriv`](@ref) and
310316
[`deriv2`](@ref) respectively.
311317

312318
```jldoctest weight
313-
julia> deriv(L2DistLoss(), [1.,2,3], [2,5,-2], AvgMode.WeightedSum([1,2,1]))
319+
julia> deriv(L2DistLoss(), [1.,2,3], [2,5,-2], AggMode.WeightedSum([1,2,1]))
314320
4.0
315321
316-
julia> deriv(L2DistLoss(), [1.,2,3], [2,5,-2], AvgMode.WeightedMean([1,2,1]))
322+
julia> deriv(L2DistLoss(), [1.,2,3], [2,5,-2], AggMode.WeightedMean([1,2,1]))
317323
1.0
318324
319-
julia> deriv(L2DistLoss(), targets, outputs, AvgMode.WeightedSum([2,1]), ObsDim.First())
325+
julia> deriv(L2DistLoss(), targets, outputs, AggMode.WeightedSum([2,1]), ObsDim.First())
320326
10.0
321327
322-
julia> deriv(L2DistLoss(), targets, outputs, AvgMode.WeightedMean([2,1]), ObsDim.First())
328+
julia> deriv(L2DistLoss(), targets, outputs, AggMode.WeightedMean([2,1]), ObsDim.First())
323329
0.8333333333333334
324330
```

docs/src/user/interface.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1+
```@meta
2+
DocTestSetup = quote
3+
using LossFunctions
4+
end
5+
```
6+
17
# Working with Losses
28

39
Even though they are called loss "functions", this package

src/LossFunctions.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ export
8282

8383
AggMode
8484

85-
include("common.jl")
85+
include("devutils.jl")
8686
include("aggregatemode.jl")
8787

8888
include("supervised/supervised.jl")

src/common.jl

Lines changed: 0 additions & 9 deletions
This file was deleted.

src/devutils.jl

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
macro dimcheck(condition)
2+
:(($(esc(condition))) || throw(DimensionMismatch("Dimensions of the parameters don't match: $($(string(condition)))")))
3+
end

src/supervised/io.jl

Lines changed: 5 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -33,33 +33,12 @@ Base.print(io::IO, loss::WeightedBinaryLoss{T,W}, args...) where {T,W} = print(i
3333

3434
_loss_xguide(loss::MarginLoss) = "y * h(x)"
3535
_loss_xguide(loss::DistanceLoss) = "h(x) - y"
36+
_loss_yguide(loss::SupervisedLoss) = "L("*_loss_xguide(loss)*")"
3637

37-
@recipe function plot(drv::Deriv, rng = -2:0.05:2)
38-
xguide --> _loss_xguide(drv.loss)
39-
yguide --> "L'(y, h(x))"
40-
label --> string(drv.loss)
41-
deriv_fun(drv.loss), rng
42-
end
43-
44-
@recipe function plot(loss::SupervisedLoss, rng = -2:0.05:2)
38+
@recipe function plot(loss::SupervisedLoss, range=-2:0.05:2; fun=value)
4539
xguide --> _loss_xguide(loss)
46-
yguide --> "L(y, h(x))"
40+
yguide --> _loss_yguide(loss)
4741
label --> string(loss)
48-
value_fun(loss), rng
49-
end
50-
51-
@recipe function plot(derivs::AbstractVector{T}, rng = -2:0.05:2) where T<:Deriv
52-
for drv in derivs
53-
@series begin
54-
drv, rng
55-
end
56-
end
57-
end
58-
59-
@recipe function plot(losses::AbstractVector{T}, rng = -2:0.05:2) where T<:SupervisedLoss
60-
for loss in losses
61-
@series begin
62-
loss, rng
63-
end
64-
end
42+
l(a) = fun(loss, a)
43+
l, range
6544
end

src/supervised/sparse.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@ end
1919
) where {T,N,Q,Ti,M}
2020
M > N && throw(ArgumentError("target has more dimensions than output; broadcasting not supported in this direction."))
2121
quote
22-
@_dimcheck size(buffer) == size(output)
23-
@nexprs $M (n)->@_dimcheck(size(target,n) == size(output,n))
22+
@dimcheck size(buffer) == size(output)
23+
@nexprs $M (n)->@dimcheck(size(target,n) == size(output,n))
2424
zeroQ = zero(Q)
2525
negQ = Q(-1)
2626
@simd for I in CartesianIndices(size(output))

0 commit comments

Comments
 (0)