Skip to content

Commit

Permalink
update algorithm in projects.json
Browse files Browse the repository at this point in the history
  • Loading branch information
HyunjunA committed Apr 6, 2024
1 parent 9930f90 commit 55912f7
Showing 1 changed file with 65 additions and 3 deletions.
68 changes: 65 additions & 3 deletions docker/dbmongo/files/projects.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,15 @@
},
"category": "classification"
},

{
"name": "GaussianNB",
"path": "sklearn.naive_bayes",
"categorical_encoding_strategy": "OneHotEncoder",
"description": "Gaussian Naive Bayes",
"url": "http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html",
"schema": {},
"category": "classification"
},
{
"name": "MultinomialNB",
"path": "sklearn.naive_bayes",
Expand Down Expand Up @@ -281,7 +289,62 @@
},
"category": "classification"
},

{
"name": "XGBClassifier",
"path": "xgboost",
"categorical_encoding_strategy": "OrdinalEncoder",
"description": "eXtreme Gradient Boosting classification",
"url": "https://xgboost.readthedocs.io/en/latest/tutorials/model.html",
"schema": {
"n_estimators": {
"description": "The number of boosting stages to perform. Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance.",
"type": "int",
"default": 100,
"ui": {
"style": "radio",
"choices": [100, 500],
"grid_search": [100]
}
},
"learning_rate": {
"description": "Learning rate shrinks the contribution of each tree by learning_rate. There is a trade-off between learning_rate and n_estimators.",
"type": "float",
"default": 0.1,
"ui": {
"style": "radio",
"choices": [0.01, 0.1, 1]
}
},
"max_depth": {
"description": "Maximum tree depth for base learners.",
"type": "int",
"default": 3,
"ui": {
"style": "radio",
"choices": [1, 3, 5, 10]
}
},
"min_child_weight": {
"description": "Minimum sum of instance weight(hessian) needed in a child.",
"type": "int",
"default": 3,
"ui": {
"style": "radio",
"choices": [1, 3, 5, 10, 20]
}
},
"subsample": {
"description": "The fraction of samples to be used for fitting the individual base learners. If smaller than 1.0 this results in Stochastic Gradient Boosting. subsample interacts with the parameter n_estimators. Choosing subsample \u003c 1.0 leads to a reduction of variance and an increase in bias.",
"type": "float",
"default": 1,
"ui": {
"style": "radio",
"choices": [0.5, 1]
}
}
},
"category": "classification"
},
{
"name": "KNeighborsClassifier",
"path": "sklearn.neighbors",
Expand Down Expand Up @@ -585,7 +648,6 @@
},
"category": "classification"
},

{
"name": "DecisionTreeRegressor",
"path": "sklearn.tree",
Expand Down

0 comments on commit 55912f7

Please sign in to comment.