我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用sklearn.linear_model.RidgeClassifier()。
def test_model_assessment(): X, y = make_classification(n_samples=40, n_features=100, n_informative=2, n_classes=2, n_redundant=0) pipe = Pipeline([('enet', ElasticNetFeatureSelection()), ('ridge', RidgeClassifier())]) ma = ModelAssessment(GridSearchCV(pipe, {'enet__l1_ratio': [2]})).fit(X, y) assert len(ma.cv_results_) == 0
def __init__(self, mu=.5, tau=1.0, lamda=1, use_gpu=False, threshold=1e-16, alpha=None, l1_ratio=None, fit_intercept=True, normalize=False, precompute=False, max_iter=10000, copy_X=True, tol=1e-4, warm_start=False, positive=False, random_state=None, selection='cyclic'): vs = L1L2(mu=mu, tau=tau, use_gpu=use_gpu, threshold=threshold, alpha=alpha, l1_ratio=l1_ratio, fit_intercept=fit_intercept, normalize=normalize, precompute=precompute, max_iter=max_iter, copy_X=copy_X, tol=tol, warm_start=warm_start, positive=positive, random_state=random_state, selection=selection) mdl = RidgeClassifier( alpha=lamda, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, random_state=random_state) super(L1L2TwoStepClassifier, self).__init__( (('l1l2', vs), ('ridge', mdl))) self.mu = mu self.tau = tau self.lamda = lamda self.alpha = alpha self.l1_ratio = l1_ratio self.use_gpu = use_gpu self.threshold = threshold self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute self.max_iter = max_iter self.copy_X = copy_X self.tol = tol self.warm_start = warm_start self.positive = positive self.intercept_ = 0.0 self.random_state = random_state self.selection = selection
def generate_base_classification(): from sklearn.svm import LinearSVC, NuSVC, SVC from sklearn.tree import ExtraTreeClassifier, DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.linear_model import LogisticRegression, PassiveAggressiveClassifier, RidgeClassifier, SGDClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB models = [ #(LinearSVC, params('C', 'loss')), # (NuSVC, params('nu', 'kernel', 'degree')), #(SVC, params('C', 'kernel')), #(ExtraTreeClassifier, params('criterion', 'min_samples_split', 'min_samples_leaf')), (DecisionTreeClassifier, params('criterion', 'min_samples_split', 'min_samples_leaf')), (RandomForestClassifier, params('criterion', 'min_samples_split', 'min_samples_leaf', 'n_estimators')), #(GaussianProcessClassifier, None), (LogisticRegression, params('C', 'penalty')), #(PassiveAggressiveClassifier, params('C', 'loss')), #(RidgeClassifier, params('alpha')), # we do in-place modification of what the method params return in order to add # more loss functions that weren't defined in the method #(SGDClassifier, params('loss', 'penalty', 'alpha')['loss'].extend(['log', 'modified_huber'])), (KNeighborsClassifier, params('n_neighbors', 'leaf_size', 'p').update({ 'algorithm': ['auto', 'brute', 'kd_tree', 'ball_tree'] })), (MultinomialNB, params('alpha')), #(GaussianNB, None), #(BernoulliNB, params('alpha')) ] return models
def _load_model(self, model_id): _, conn = get_engine() #todo models = { 'QXgb': QXgb, 'QXgb2': QXgb2, 'Ridge': Ridge, 'RidgeClassifier': RidgeClassifier, 'KNeighborsClassifier': KNeighborsClassifier, 'QAvg': QAvg, 'QRankedAvg': QRankedAvg, 'QRankedByLineAvg': QRankedByLineAvg, 'QStackModel': QStackModel, 'LogisticRegression': LogisticRegression, 'DecisionTreeClassifier': DecisionTreeClassifier, 'QPostProcessingModel': QPostProcessingModel, 'RandomForestClassifier': RandomForestClassifier, 'ExtraTreesClassifier': ExtraTreesClassifier, 'QAvgOneModelData': QAvgOneModelData, 'QNN1': QNN1, 'QNN2': QNN2, } res = conn.execute( """ select cls, params, descr, predict_fn from qml_models where model_id='{}' """.format(model_id) ).fetchone() if not res: raise Exception('Missing {} model'.format(model_id)) model = models[res['cls']](**json.loads(res['params'])) self.add(model_id, model, res['descr'], res['predict_fn']) return model
def run_solver(self, unit, n_units, arm): start_time=time.time() #kernel_map=dict(zip([1,2,3],['rbf','poly','sigmoid'])) preprocess_map=dict(zip([1,2,3,4],['none','min_max','scaled','normalized'])) self.compute_preprocessor(preprocess_map[arm['preprocessor']]) # Create random features features=kernel_approximation.RBFSampler(gamma=arm['gamma'],n_components=n_units, random_state=1) train_features=features.fit_transform(self.data['X_train']) val_features=features.transform(self.data['X_val']) test_features=features.transform(self.data['X_test']) approx_time=(time.time()-start_time)/60.0 print 'approximating kernel took %r' % approx_time clf = linear_model.RidgeClassifier(alpha=1.0/(arm['C']*n_units),solver='lsqr',copy_X=False) clf.fit(train_features, self.data['y_train']) print 'fitting model took %r' % ((time.time()-start_time)/60.0 - approx_time) # Validate this hyperparameter configuration on the full validation data #y_loss = 1 - clf.score(self.data['X_train'], self.data['y_train']) y_loss=1 test_acc=0 val_acc= clf.score(val_features, self.data['y_val']) test_acc = clf.score(test_features, self.data['y_test']) del self.data del train_features, val_features, test_features gc.collect() return y_loss,val_acc,test_acc