This demo assumes you have certain packages in your active package environment. To activate a new environment, "MyNewEnv", with just these packages, do this in a new REPL session:
using Pkg
Pkg.activate("MyNewEnv")
Pkg.add(["MLJ", "RDatasets", "DataFrames", "MLJDecisionTreeInterface",
"MLJMultivariateStatsInterface", "NearestNeighborModels", "MLJGLMInterface",
"Plots"])
The following starts MLJ and shows the current version of MLJ (you can also use Pkg.status()
):
using MLJ
-MLJ_VERSION
v"0.20.5"
import RDatasets
+MLJ_VERSION
v"0.20.6"
import RDatasets
channing = RDatasets.dataset("boot", "channing")
first(channing, 4) |> pretty
┌──────────────────────────────────┬───────┬───────┬───────┬───────┐
│ Sex │ Entry │ Exit │ Time │ Cens │
│ CategoricalValue{String, UInt32} │ Int32 │ Int32 │ Int32 │ Int32 │
@@ -61,7 +61,7 @@
"setosa"
"setosa"
"setosa"
Splitting data vertically after row shuffling:
channing_train, channing_test = partition(channing, 0.6, rng=123);
Or, if already horizontally split:
(Xtrain, Xtest), (ytrain, ytest) = partition((X, y), 0.6, multi=true, rng=123)
(((sepal_length = [6.7, 5.7, 7.2, 4.4, 5.6, 6.5, 4.4, 6.1, 5.4, 4.9 … 6.4, 5.5, 5.4, 4.8, 6.5, 4.9, 6.5, 6.7, 5.6, 6.4], sepal_width = [3.3, 2.8, 3.0, 2.9, 2.5, 3.0, 3.0, 2.9, 3.9, 2.5 … 3.1, 2.3, 3.7, 3.1, 3.0, 2.4, 2.8, 3.3, 2.9, 2.8], petal_length = [5.7, 4.1, 5.8, 1.4, 3.9, 5.2, 1.3, 4.7, 1.7, 4.5 … 5.5, 4.0, 1.5, 1.6, 5.5, 3.3, 4.6, 5.7, 3.6, 5.6], petal_width = [2.1, 1.3, 1.6, 0.2, 1.1, 2.0, 0.2, 1.4, 0.4, 1.7 … 1.8, 1.3, 0.2, 0.2, 1.8, 1.0, 1.5, 2.5, 1.3, 2.2]), (sepal_length = [6.0, 5.8, 6.7, 5.1, 5.0, 6.3, 5.7, 6.4, 6.1, 5.0 … 6.4, 6.8, 6.9, 6.1, 6.7, 5.0, 7.6, 6.3, 5.1, 5.0], sepal_width = [2.7, 2.6, 3.0, 3.8, 3.4, 2.8, 2.5, 3.2, 2.8, 3.5 … 2.7, 3.2, 3.1, 2.8, 2.5, 3.5, 3.0, 2.5, 3.8, 3.6], petal_length = [5.1, 4.0, 5.2, 1.9, 1.5, 5.1, 5.0, 4.5, 4.7, 1.6 … 5.3, 5.9, 5.4, 4.0, 5.8, 1.3, 6.6, 5.0, 1.6, 1.4], petal_width = [1.6, 1.2, 2.3, 0.4, 0.2, 1.5, 2.0, 1.5, 1.2, 0.6 … 1.9, 2.3, 2.1, 1.3, 1.8, 0.3, 2.1, 1.9, 0.2, 0.2])), (CategoricalValue{String, UInt32}["virginica", "versicolor", "virginica", "setosa", "versicolor", "virginica", "setosa", "versicolor", "setosa", "virginica" … "virginica", "versicolor", "setosa", "setosa", "virginica", "versicolor", "versicolor", "virginica", "versicolor", "virginica"], CategoricalValue{String, UInt32}["versicolor", "versicolor", "virginica", "setosa", "setosa", "virginica", "virginica", "versicolor", "versicolor", "setosa" … "virginica", "virginica", "virginica", "versicolor", "virginica", "setosa", "virginica", "virginica", "setosa", "setosa"]))
Reference: Model Search
Searching for a supervised model:
X, y = @load_boston
-ms = models(matching(X, y))
70-element Vector{NamedTuple{(:name, :package_name, :is_supervised, :abstract_type, :deep_properties, :docstring, :fit_data_scitype, :human_name, :hyperparameter_ranges, :hyperparameter_types, :hyperparameters, :implemented_methods, :inverse_transform_scitype, :is_pure_julia, :is_wrapper, :iteration_parameter, :load_path, :package_license, :package_url, :package_uuid, :predict_scitype, :prediction_type, :reporting_operations, :reports_feature_importances, :supports_class_weights, :supports_online, :supports_training_losses, :supports_weights, :transform_scitype, :input_scitype, :target_scitype, :output_scitype)}}:
+ms = models(matching(X, y))
70-element Vector{NamedTuple{(:name, :package_name, :is_supervised, :abstract_type, :constructor, :deep_properties, :docstring, :fit_data_scitype, :human_name, :hyperparameter_ranges, :hyperparameter_types, :hyperparameters, :implemented_methods, :inverse_transform_scitype, :is_pure_julia, :is_wrapper, :iteration_parameter, :load_path, :package_license, :package_url, :package_uuid, :predict_scitype, :prediction_type, :reporting_operations, :reports_feature_importances, :supports_class_weights, :supports_online, :supports_training_losses, :supports_weights, :transform_scitype, :input_scitype, :target_scitype, :output_scitype)}}:
(name = ARDRegressor, package_name = MLJScikitLearnInterface, ... )
(name = AdaBoostRegressor, package_name = MLJScikitLearnInterface, ... )
(name = BaggingRegressor, package_name = MLJScikitLearnInterface, ... )
@@ -85,6 +85,7 @@
package_name = "MLJModels",
is_supervised = true,
abstract_type = Probabilistic,
+ constructor = nothing,
deep_properties = (),
docstring = "```\nConstantRegressor\n```\n\nThis \"dummy\" probabilis...",
fit_data_scitype = Tuple{Table, AbstractVector{Continuous}},
@@ -99,7 +100,7 @@
iteration_parameter = nothing,
load_path = "MLJModels.ConstantRegressor",
package_license = "MIT",
- package_url = "https://github.com/alan-turing-institute/MLJModels.jl",
+ package_url = "https://github.com/JuliaAI/MLJModels.jl",
package_uuid = "d491faf4-2d78-11e9-2867-c94bc002c0b7",
predict_scitype = AbstractVector{ScientificTypesBase.Density{Continuous}},
prediction_type = :probabilistic,
@@ -112,7 +113,7 @@
transform_scitype = Unknown,
input_scitype = Table,
target_scitype = AbstractVector{Continuous},
- output_scitype = Unknown)
models("Tree")
28-element Vector{NamedTuple{(:name, :package_name, :is_supervised, :abstract_type, :deep_properties, :docstring, :fit_data_scitype, :human_name, :hyperparameter_ranges, :hyperparameter_types, :hyperparameters, :implemented_methods, :inverse_transform_scitype, :is_pure_julia, :is_wrapper, :iteration_parameter, :load_path, :package_license, :package_url, :package_uuid, :predict_scitype, :prediction_type, :reporting_operations, :reports_feature_importances, :supports_class_weights, :supports_online, :supports_training_losses, :supports_weights, :transform_scitype, :input_scitype, :target_scitype, :output_scitype)}}:
+ output_scitype = Unknown)
models("Tree")
28-element Vector{NamedTuple{(:name, :package_name, :is_supervised, :abstract_type, :constructor, :deep_properties, :docstring, :fit_data_scitype, :human_name, :hyperparameter_ranges, :hyperparameter_types, :hyperparameters, :implemented_methods, :inverse_transform_scitype, :is_pure_julia, :is_wrapper, :iteration_parameter, :load_path, :package_license, :package_url, :package_uuid, :predict_scitype, :prediction_type, :reporting_operations, :reports_feature_importances, :supports_class_weights, :supports_online, :supports_training_losses, :supports_weights, :transform_scitype, :input_scitype, :target_scitype, :output_scitype)}}:
(name = ABODDetector, package_name = OutlierDetectionNeighbors, ... )
(name = AdaBoostStumpClassifier, package_name = DecisionTree, ... )
(name = COFDetector, package_name = OutlierDetectionNeighbors, ... )
@@ -136,7 +137,7 @@
matching(model, X, y) &&
model.prediction_type == :deterministic &&
model.is_pure_julia
-end;
Searching for an unsupervised model:
models(matching(X))
63-element Vector{NamedTuple{(:name, :package_name, :is_supervised, :abstract_type, :deep_properties, :docstring, :fit_data_scitype, :human_name, :hyperparameter_ranges, :hyperparameter_types, :hyperparameters, :implemented_methods, :inverse_transform_scitype, :is_pure_julia, :is_wrapper, :iteration_parameter, :load_path, :package_license, :package_url, :package_uuid, :predict_scitype, :prediction_type, :reporting_operations, :reports_feature_importances, :supports_class_weights, :supports_online, :supports_training_losses, :supports_weights, :transform_scitype, :input_scitype, :target_scitype, :output_scitype)}}:
+end;
Searching for an unsupervised model:
models(matching(X))
63-element Vector{NamedTuple{(:name, :package_name, :is_supervised, :abstract_type, :constructor, :deep_properties, :docstring, :fit_data_scitype, :human_name, :hyperparameter_ranges, :hyperparameter_types, :hyperparameters, :implemented_methods, :inverse_transform_scitype, :is_pure_julia, :is_wrapper, :iteration_parameter, :load_path, :package_license, :package_url, :package_uuid, :predict_scitype, :prediction_type, :reporting_operations, :reports_feature_importances, :supports_class_weights, :supports_online, :supports_training_losses, :supports_weights, :transform_scitype, :input_scitype, :target_scitype, :output_scitype)}}:
(name = ABODDetector, package_name = OutlierDetectionNeighbors, ... )
(name = ABODDetector, package_name = OutlierDetectionPython, ... )
(name = AffinityPropagation, package_name = MLJScikitLearnInterface, ... )
@@ -161,6 +162,7 @@
package_name = "MultivariateStats",
is_supervised = true,
abstract_type = Deterministic,
+ constructor = nothing,
deep_properties = (),
docstring = "```\nRidgeRegressor\n```\n\nA model type for construct...",
fit_data_scitype =
@@ -254,8 +256,8 @@
rng = Random._GLOBAL_RNG())
Bind the model and data together in a machine, which will additionally, store the learned parameters (fitresults) when fit:
mach = machine(tree, X, y)
untrained Machine; caches model-specific representations of data
model: DecisionTreeClassifier(max_depth = 2, …)
args:
- 1: Source @161 ⏎ Table{AbstractVector{Continuous}}
- 2: Source @320 ⏎ AbstractVector{Multiclass{2}}
+ 1: Source @906 ⏎ Table{AbstractVector{Continuous}}
+ 2: Source @359 ⏎ AbstractVector{Multiclass{2}}
Split row indices into training and evaluation rows:
train, test = partition(eachindex(y), 0.7); # 70:30 split
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10 … 131, 132, 133, 134, 135, 136, 137, 138, 139, 140], [141, 142, 143, 144, 145, 146, 147, 148, 149, 150 … 191, 192, 193, 194, 195, 196, 197, 198, 199, 200])
Fit on the train data set and evaluate on the test data set:
fit!(mach, rows=train)
yhat = predict(mach, X[test,:])
LogLoss(tol=1e-4)(yhat, y[test])
1.0788055664326648
Note LogLoss()
has aliases log_loss
and cross_entropy
.
Predict on the new data set:
Xnew = (FL = rand(3), RW = rand(3), CL = rand(3), CW = rand(3), BD = rand(3))
@@ -328,14 +330,14 @@
┌───┬──────────────────────┬──────────────┬─────────────┐
│ │ measure │ operation │ measurement │
├───┼──────────────────────┼──────────────┼─────────────┤
-│ A │ LogLoss( │ predict │ 4.79 │
+│ A │ LogLoss( │ predict │ 4.81 │
│ │ tol = 2.22045e-16) │ │ │
│ B │ Accuracy() │ predict_mode │ 0.736 │
└───┴──────────────────────┴──────────────┴─────────────┘
┌───┬───────────────────────┬─────────┐
│ │ per_fold │ 1.96*SE │
├───┼───────────────────────┼─────────┤
-│ A │ [5.1, 6.48, 3.01] │ 2.42 │
+│ A │ [5.1, 6.48, 3.07] │ 2.38 │
│ B │ [0.696, 0.739, 0.769] │ 0.0513 │
└───┴───────────────────────┴─────────┘
Changing a hyperparameter and re-evaluating:
tree.max_depth = 3
@@ -371,20 +373,20 @@
mach = machine(ols, X, y) |> fit!
trained Machine; caches model-specific representations of data
model: LinearRegressor(fit_intercept = true, …)
args:
- 1: Source @404 ⏎ Table{AbstractVector{Continuous}}
- 2: Source @045 ⏎ AbstractVector{Continuous}
+ 1: Source @645 ⏎ Table{AbstractVector{Continuous}}
+ 2: Source @770 ⏎ AbstractVector{Continuous}
Get a named tuple representing the learned parameters, human-readable if appropriate:
fitted_params(mach)
(features = [:x1, :x2],
- coef = [0.9888952836446506, -2.004049829561238],
- intercept = 0.05769536670984801,)
Get other training-related information:
report(mach)
(stderror = [0.007604565930774463, 0.010189538030015845, 0.010246024389886282],
+ coef = [1.0019484491170485, -2.0124583022683673],
+ intercept = 0.05839955441025016,)
Get other training-related information:
report(mach)
(stderror = [0.007289046266540614, 0.009314702321351547, 0.009664751997931865],
dof_residual = 97.0,
- vcov = [5.7829422995495684e-5 -4.839517622172482e-5 -4.824455318623773e-5; -4.839517622172482e-5 0.0001038266852651392 -9.317757798200271e-6; -4.824455318623773e-5 -9.317757798200271e-6 0.00010498101579814456],
- deviance = 0.08456127941358121,
+ vcov = [5.3130195475769666e-5 -4.737168085144333e-5 -4.924311372223852e-5; -4.737168085144333e-5 8.676367933539189e-5 1.3161433444949447e-5; -4.924311372223852e-5 1.3161433444949447e-5 9.340743118152798e-5],
+ deviance = 0.07678443409575168,
coef_table = ──────────────────────────────────────────────────────────────────────────────
Coef. Std. Error t Pr(>|t|) Lower 95% Upper 95%
──────────────────────────────────────────────────────────────────────────────
-(Intercept) 0.0576954 0.00760457 7.59 <1e-10 0.0426024 0.0727883
-x1 0.988895 0.0101895 97.05 <1e-97 0.968672 1.00912
-x2 -2.00405 0.010246 -195.59 <1e-99 -2.02439 -1.98371
+(Intercept) 0.0583996 0.00728905 8.01 <1e-11 0.0439328 0.0728663
+x1 1.00195 0.0093147 107.57 <1e-99 0.983461 1.02044
+x2 -2.01246 0.00966475 -208.23 <1e-99 -2.03164 -1.99328
──────────────────────────────────────────────────────────────────────────────,)
Load data:
X, y = @load_iris # a table and a vector
train, test = partition(eachindex(y), 0.97, shuffle=true, rng=123)
([125, 100, 130, 9, 70, 148, 39, 64, 6, 107 … 110, 59, 139, 21, 112, 144, 140, 72, 109, 41], [106, 147, 47, 5])
Instantiate and fit the model/machine:
PCA = @load PCA
pca = PCA(maxoutdim=2)
@@ -392,12 +394,12 @@
fit!(mach, rows=train)
trained Machine; caches model-specific representations of data
model: PCA(maxoutdim = 2, …)
args:
- 1: Source @151 ⏎ Table{AbstractVector{Continuous}}
+ 1: Source @053 ⏎ Table{AbstractVector{Continuous}}
Transform selected data bound to the machine:
transform(mach, rows=test);
(x1 = [-3.394282685448322, -1.5219827578765053, 2.53824745518522, 2.7299639893931382],
x2 = [0.547245022374522, -0.36842368617126425, 0.5199299511335688, 0.3448466122232349],)
Transform new data:
Xnew = (sepal_length=rand(3), sepal_width=rand(3),
petal_length=rand(3), petal_width=rand(3));
-transform(mach, Xnew)
(x1 = [4.4240113088483985, 4.903489857969179, 4.71363023881634],
- x2 = [-4.548271408319666, -5.023677889350307, -4.738428955775531],)
y = rand(100);
+transform(mach, Xnew)
(x1 = [4.60254619833418, 4.963408439322138, 4.73352667809396],
+ x2 = [-4.450747224690028, -4.340052887208079, -4.323758570369482],)
y = rand(100);
stand = Standardizer()
mach = machine(stand, y)
fit!(mach)
@@ -460,13 +462,13 @@
logger = nothing)
Bound the wrapped model to data:
mach = machine(tuned_forest, X, y)
untrained Machine; does not cache data
model: ProbabilisticTunedModel(model = ProbabilisticEnsembleModel(model = DecisionTreeClassifier(max_depth = -1, …), …), …)
args:
- 1: Source @157 ⏎ Table{AbstractVector{Continuous}}
- 2: Source @318 ⏎ AbstractVector{Multiclass{3}}
+ 1: Source @313 ⏎ Table{AbstractVector{Continuous}}
+ 2: Source @689 ⏎ AbstractVector{Multiclass{3}}
Fitting the resultant machine optimizes the hyperparameters specified in range
, using the specified tuning
and resampling
strategies and performance measure
(possibly a vector of measures), and retrains on all data bound to the machine:
fit!(mach)
trained Machine; does not cache data
model: ProbabilisticTunedModel(model = ProbabilisticEnsembleModel(model = DecisionTreeClassifier(max_depth = -1, …), …), …)
args:
- 1: Source @157 ⏎ Table{AbstractVector{Continuous}}
- 2: Source @318 ⏎ AbstractVector{Multiclass{3}}
+ 1: Source @313 ⏎ Table{AbstractVector{Continuous}}
+ 2: Source @689 ⏎ AbstractVector{Multiclass{3}}
Inspecting the optimal model:
F = fitted_params(mach)
(best_model = ProbabilisticEnsembleModel(model = DecisionTreeClassifier(max_depth = -1, …), …),
best_fitted_params = (fitresult = WrappedEnsemble(atom = DecisionTreeClassifier(max_depth = -1, …), …),),)
F.best_model
ProbabilisticEnsembleModel(
model = DecisionTreeClassifier(
@@ -474,7 +476,7 @@
min_samples_leaf = 1,
min_samples_split = 2,
min_purity_increase = 0.0,
- n_subfeatures = 3,
+ n_subfeatures = 4,
post_prune = false,
merge_purity_threshold = 1.0,
display_depth = 5,
@@ -487,12 +489,12 @@
acceleration = CPU1{Nothing}(nothing),
out_of_bag_measure = Any[])
Inspecting details of tuning procedure:
r = report(mach);
keys(r)
(:best_model, :best_history_entry, :history, :best_report, :plotting)
r.history[[1,end]]
2-element Vector{@NamedTuple{model::MLJEnsembles.ProbabilisticEnsembleModel{MLJDecisionTreeInterface.DecisionTreeClassifier}, measure::Vector{StatisticalMeasuresBase.RobustMeasure{StatisticalMeasuresBase.FussyMeasure{StatisticalMeasuresBase.RobustMeasure{StatisticalMeasures._BrierLossType}, typeof(StatisticalMeasures.l2_check)}}}, measurement::Vector{Float64}, per_fold::Vector{Vector{Float64}}, evaluation::CompactPerformanceEvaluation{MLJEnsembles.ProbabilisticEnsembleModel{MLJDecisionTreeInterface.DecisionTreeClassifier}, Vector{StatisticalMeasuresBase.RobustMeasure{StatisticalMeasuresBase.FussyMeasure{StatisticalMeasuresBase.RobustMeasure{StatisticalMeasures._BrierLossType}, typeof(StatisticalMeasures.l2_check)}}}, Vector{Float64}, Vector{typeof(predict)}, Vector{Vector{Float64}}, Vector{Vector{Vector{Float64}}}, CV}}}:
- (model = ProbabilisticEnsembleModel(model = DecisionTreeClassifier(max_depth = -1, …), …), measure = [BrierLoss()], measurement = [0.10485007407407392], per_fold = [[-0.0, -0.0, 0.134687111111111, 0.1530017777777773, 0.13752799999999973, 0.20388355555555532]], evaluation = CompactPerformanceEvaluation(0.105,))
- (model = ProbabilisticEnsembleModel(model = DecisionTreeClassifier(max_depth = -1, …), …), measure = [BrierLoss()], measurement = [0.11550755555555536], per_fold = [[0.008363555555555645, 0.0002675555555555853, 0.15716799999999978, 0.15550933333333286, 0.15517244444444409, 0.21656444444444425]], evaluation = CompactPerformanceEvaluation(0.116,))
Visualizing these results:
using Plots
+ (model = ProbabilisticEnsembleModel(model = DecisionTreeClassifier(max_depth = -1, …), …), measure = [BrierLoss()], measurement = [0.11061688888888872], per_fold = [[0.008769777777777862, 0.00018311111111112943, 0.13994577777777764, 0.15614133333333288, 0.14898399999999967, 0.20967733333333313]], evaluation = CompactPerformanceEvaluation(0.111,))
+ (model = ProbabilisticEnsembleModel(model = DecisionTreeClassifier(max_depth = -1, …), …), measure = [BrierLoss()], measurement = [0.12125549176954746], per_fold = [[0.02781777777777793, 0.007603555555555701, 0.19223187037037057, 0.1535252222222222, 0.1663280555555555, 0.18002646913580272]], evaluation = CompactPerformanceEvaluation(0.121,))
Visualizing these results:
using Plots
plot(mach)
Predicting on new data using the optimized model trained on all data:
predict(mach, Xnew)
3-element UnivariateFiniteVector{Multiclass{3}, String, UInt32, Float64}:
UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>0.0, virginica=>0.0)
UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>0.0, virginica=>0.0)
- UnivariateFinite{Multiclass{3}}(setosa=>1.0, versicolor=>0.0, virginica=>0.0)
Reference: Linear Pipelines
Constructing a linear (unbranching) pipeline with a learned target transformation/inverse transformation:
X, y = @load_reduced_ames
+ UnivariateFinite{Multiclass{3}}(setosa=>0.767, versicolor=>0.213, virginica=>0.02)
Reference: Linear Pipelines
Constructing a linear (unbranching) pipeline with a learned target transformation/inverse transformation:
X, y = @load_reduced_ames
KNN = @load KNNRegressor
knn_with_target = TransformedTargetModel(model=KNN(K=3), transformer=Standardizer())
TransformedTargetModelDeterministic(
model = KNNRegressor(
@@ -556,13 +558,13 @@
┌──────────────────────┬───────────┬─────────────┐
│ measure │ operation │ measurement │
├──────────────────────┼───────────┼─────────────┤
-│ LogLoss( │ predict │ 0.626 │
+│ LogLoss( │ predict │ 0.428 │
│ tol = 2.22045e-16) │ │ │
└──────────────────────┴───────────┴─────────────┘
┌────────────────────────────────────────────────┬─────────┐
│ per_fold │ 1.96*SE │
├────────────────────────────────────────────────┼─────────┤
-│ [3.89e-15, 3.89e-15, 0.278, 1.62, 1.56, 0.302] │ 0.663 │
+│ [3.89e-15, 3.89e-15, 0.294, 0.41, 1.56, 0.299] │ 0.51 │
└────────────────────────────────────────────────┴─────────┘
Generate a plot of performance, as a function of some hyperparameter (building on the preceding example)
Single performance curve:
r = range(forest, :n, lower=1, upper=1000, scale=:log10)
curve = learning_curve(mach,
@@ -573,7 +575,7 @@
verbosity=0)
(parameter_name = "n",
parameter_scale = :log10,
parameter_values = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11 … 281, 324, 373, 429, 494, 569, 655, 754, 869, 1000],
- measurements = [16.820371581588002, 5.237112030897364, 2.0597068716412203, 2.0134506566622394, 2.011478973790339, 2.0469387705862574, 1.9635257776734945, 1.9175424519659632, 1.8807692408608805, 1.8956267402240952 … 1.227922996942082, 1.2317750828445333, 1.2242819711464847, 1.2278722009771288, 1.224882436137922, 1.229692343012722, 1.2290129800152094, 1.230548543545263, 1.2331632457059212, 1.2423449607020516],)
using Plots
+ measurements = [4.004850376568572, 4.1126732713223415, 4.067922726718731, 4.123999873775369, 4.150105956717014, 2.688089225524209, 2.715285824731319, 2.7309139415415857, 2.7444858783511297, 2.7476450089856033 … 1.269185619048552, 1.2786364928754186, 1.2725212042652867, 1.2789570911204242, 1.2797130430389276, 1.2768033472128724, 1.2644056972193418, 1.2598962094386172, 1.2612790173706743, 1.2557508210679436],)
using Plots
plot(curve.parameter_values, curve.measurements,
xlab=curve.parameter_name, xscale=curve.parameter_scale)
Multiple curves:
curve = learning_curve(mach,
range=r,
@@ -585,5 +587,5 @@
verbosity=0)
(parameter_name = "n",
parameter_scale = :log10,
parameter_values = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11 … 281, 324, 373, 429, 494, 569, 655, 754, 869, 1000],
- measurements = [4.004850376568572 8.009700753137146 16.820371581588002 8.009700753137146; 4.004850376568572 8.040507294495367 9.087929700674836 8.040507294495367; … ; 1.2032433035747263 1.2341483592529663 1.2651934430346028 1.2739529214818293; 1.2088161637388406 1.2322191736815042 1.2677284135372628 1.2752290384276348],)
plot(curve.parameter_values, curve.measurements,
- xlab=curve.parameter_name, xscale=curve.parameter_scale)