Skip to content

Commit 6248967

Browse files
Update main.py
1 parent 3fee341 commit 6248967

File tree

1 file changed

+87
-88
lines changed

1 file changed

+87
-88
lines changed

main.py

+87-88
Original file line numberDiff line numberDiff line change
@@ -16,100 +16,99 @@
1616

1717
def config():
1818
param_grids = [
19-
# {"n_components": [1, 2, 3]},
20-
# {"n_components": [1, 2, 3]},
21-
# {
22-
# "kernel": ["rbf"],
23-
# "n_components": [1, 2, 3],
24-
# "gamma": [None],
25-
# "fit_inverse_transform": [True],
26-
# "n_jobs": [-1],
27-
# },
28-
# {
29-
# "n_components": [1, 2, 3],
30-
# "alpha": [0.001, 0.01],
31-
# "n_jobs": [-1],
32-
# },
33-
# {
34-
# "n_components": [1, 2, 3],
35-
# "algorithm": ["randomized"],
36-
# "n_iter": [1, 2, 4, 5],
37-
# },
38-
# {"n_components": [1, 2, 3], "eps": [0.125, 0.75, 1]},
39-
# {"n_components": [1, 2, 3]},
40-
# {"n_components": [1, 2, 3]},
41-
# {
42-
# "n_components": [1, 2, 3],
43-
# "density": ["auto"],
44-
# "eps": [
45-
# 0.5,
46-
# ],
47-
# "dense_output": [True, False],
48-
# },
19+
{"n_components": [1, 2, 3]},
20+
{"n_components": [1, 2, 3]},
21+
{
22+
"kernel": ["rbf"],
23+
"n_components": [1, 2, 3],
24+
"gamma": [None],
25+
"fit_inverse_transform": [True],
26+
"n_jobs": [-1],
27+
},
28+
{
29+
"n_components": [1, 2, 3],
30+
"alpha": [0.001, 0.01],
31+
"n_jobs": [-1],
32+
},
33+
{
34+
"n_components": [1, 2, 3],
35+
"algorithm": ["randomized"],
36+
"n_iter": [1, 2, 4, 5],
37+
},
38+
{"n_components": [1, 2, 3], "eps": [0.125, 0.75, 1]},
39+
{"n_components": [1, 2, 3]},
40+
{"n_components": [1, 2, 3]},
41+
{
42+
"n_components": [1, 2, 3],
43+
"density": ["auto"],
44+
"eps": [
45+
0.5,
46+
],
47+
"dense_output": [True, False],
48+
},
4949
{"n_components": [2, 3], "n_jobs": [-1], "n_neighbors": [1, 5]},
50-
# {
51-
# "n_components": [1, 2, 3],
52-
# "batch_size": [100, 200],
53-
# "alpha": [
54-
# 0.0001,
55-
# 0.001,
56-
# 0.01,
57-
# ],
58-
# "n_iter": [
59-
# 2,
60-
# 3,
61-
# 4,
62-
# ],
63-
# },
64-
# {
65-
# "n_components": [1, 2, 3],
66-
# "algorithm": ["parallel", "deflation"],
67-
# "whiten": [True, False],
68-
# "max_iter": [50, 100],
69-
# },
70-
# {
71-
# "n_components": [1, 2, 3],
72-
# "n_neighbors": [10],
73-
# "method": ["modified"],
74-
# "n_jobs": [4],
75-
# },
50+
{
51+
"n_components": [1, 2, 3],
52+
"batch_size": [100, 200],
53+
"alpha": [
54+
0.0001,
55+
0.001,
56+
0.01,
57+
],
58+
"n_iter": [
59+
2,
60+
3,
61+
4,
62+
],
63+
},
64+
{
65+
"n_components": [1, 2, 3],
66+
"algorithm": ["parallel", "deflation"],
67+
"whiten": [True, False],
68+
"max_iter": [50, 100],
69+
},
70+
{
71+
"n_components": [1, 2, 3],
72+
"n_neighbors": [10],
73+
"method": ["modified"],
74+
"n_jobs": [4],
75+
},
7676
]
7777
reduction_methods = [
78-
# PCA,
79-
# IncrementalPCA,
80-
# KernelPCA,
81-
# SparsePCA,
82-
# TruncatedSVD,
83-
# GaussianRandomProjection,
84-
# LinearDiscriminantAnalysis,
85-
# NeighborhoodComponentsAnalysis,
86-
# SparseRandomProjection,
78+
PCA,
79+
IncrementalPCA,
80+
KernelPCA,
81+
SparsePCA,
82+
TruncatedSVD,
83+
GaussianRandomProjection,
84+
LinearDiscriminantAnalysis,
85+
NeighborhoodComponentsAnalysis,
86+
SparseRandomProjection,
8787
Isomap,
88-
# MiniBatchDictionaryLearning,
89-
# FastICA,
90-
# LocallyLinearEmbedding,
88+
MiniBatchDictionaryLearning,
89+
FastICA,
90+
LocallyLinearEmbedding,
9191
]
9292
standard_pipeline = Pipeline([("StandardScalar", StandardScaler())])
9393
return param_grids, standard_pipeline, reduction_methods
9494

9595

96-
if __name__ == "__main__":
97-
X_train, X_test, y_train, y_test = load_dataset()
98-
param_grids, standard_pipeline, reduction_methods = config()
99-
all_possible_variations = Variations(
100-
param_grids=param_grids,
101-
reduction_methods=reduction_methods,
102-
standard_pipeline=standard_pipeline,
103-
analysis_instance=Analysis(X_train, y_train),
104-
).produce_variations()
105-
all_pipeline_performance, best_performances = Evaluation(
106-
_data={
107-
"X_train": X_train,
108-
"X_test": X_test,
109-
"y_train": y_train,
110-
"y_test": y_test,
111-
},
112-
all_possible_variations=all_possible_variations,
113-
labels=np.unique(y_train),
114-
).evaluate()
115-
pprint(best_performances)
96+
X_train, X_test, y_train, y_test = load_dataset()
97+
param_grids, standard_pipeline, reduction_methods = config()
98+
all_possible_variations = Variations(
99+
param_grids=param_grids,
100+
reduction_methods=reduction_methods,
101+
standard_pipeline=standard_pipeline,
102+
analysis_instance=Analysis(X_train, y_train),
103+
).produce_variations()
104+
all_pipeline_performance, best_performances = Evaluation(
105+
_data={
106+
"X_train": X_train,
107+
"X_test": X_test,
108+
"y_train": y_train,
109+
"y_test": y_test,
110+
},
111+
all_possible_variations=all_possible_variations,
112+
labels=np.unique(y_train),
113+
).evaluate()
114+
pprint(best_performances)

0 commit comments

Comments
 (0)