Linux vmi2545633.contaboserver.net 6.1.0-32-amd64 #1 SMP PREEMPT_DYNAMIC Debian 6.1.129-1 (2025-03-06) x86_64
Apache/2.4.62 (Debian)
Server IP : 127.0.0.1 & Your IP : 127.0.0.1
Domains :
Cant Read [ /etc/named.conf ]
User : www-data
Terminal
Auto Root
Create File
Create Folder
Localroot Suggester
Backdoor Destroyer
Readme
/
usr /
local /
lib /
python3.11 /
dist-packages /
sklearn /
Delete
Unzip
Name
Size
Permission
Date
Action
__check_build
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
__pycache__
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
_build_utils
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
_loss
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
cluster
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
compose
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
covariance
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
cross_decomposition
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
datasets
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
decomposition
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
ensemble
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
experimental
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
externals
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
feature_extraction
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
feature_selection
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
frozen
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
gaussian_process
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
impute
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
inspection
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
linear_model
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
manifold
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
metrics
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
mixture
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
model_selection
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
neighbors
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
neural_network
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
preprocessing
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
semi_supervised
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
svm
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
tests
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
tree
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
utils
[ DIR ]
drwxr-xr-x
2025-04-08 17:00
__init__.py
4.53
KB
-rw-r--r--
2025-04-08 17:00
_built_with_meson.py
0
B
-rw-r--r--
2025-04-08 17:00
_config.py
13.25
KB
-rw-r--r--
2025-04-08 17:00
_distributor_init.py
424
B
-rw-r--r--
2025-04-08 17:00
_isotonic.cpython-311-x86_64-linux-gnu.so
318.26
KB
-rwxr-xr-x
2025-04-08 17:00
_isotonic.pyx
3.62
KB
-rw-r--r--
2025-04-08 17:00
_min_dependencies.py
2.77
KB
-rw-r--r--
2025-04-08 17:00
base.py
47.58
KB
-rw-r--r--
2025-04-08 17:00
calibration.py
49.37
KB
-rw-r--r--
2025-04-08 17:00
conftest.py
12.28
KB
-rw-r--r--
2025-04-08 17:00
discriminant_analysis.py
39.54
KB
-rw-r--r--
2025-04-08 17:00
dummy.py
23.86
KB
-rw-r--r--
2025-04-08 17:00
exceptions.py
7.52
KB
-rw-r--r--
2025-04-08 17:00
isotonic.py
16.99
KB
-rw-r--r--
2025-04-08 17:00
kernel_approximation.py
38.76
KB
-rw-r--r--
2025-04-08 17:00
kernel_ridge.py
9
KB
-rw-r--r--
2025-04-08 17:00
meson.build
8.76
KB
-rw-r--r--
2025-04-08 17:00
multiclass.py
43.21
KB
-rw-r--r--
2025-04-08 17:00
multioutput.py
42.64
KB
-rw-r--r--
2025-04-08 17:00
naive_bayes.py
54.6
KB
-rw-r--r--
2025-04-08 17:00
pipeline.py
82.63
KB
-rw-r--r--
2025-04-08 17:00
random_projection.py
27.69
KB
-rw-r--r--
2025-04-08 17:00
Save
Rename
# Author: Nelle Varoquaux, Andrew Tulloch, Antony Lee # Uses the pool adjacent violators algorithm (PAVA), with the # enhancement of searching for the longest decreasing subsequence to # pool at each step. import numpy as np from cython cimport floating def _inplace_contiguous_isotonic_regression(floating[::1] y, floating[::1] w): cdef: Py_ssize_t n = y.shape[0], i, k floating prev_y, sum_wy, sum_w Py_ssize_t[::1] target = np.arange(n, dtype=np.intp) # target describes a list of blocks. At any time, if [i..j] (inclusive) is # an active block, then target[i] := j and target[j] := i. # For "active" indices (block starts): # w[i] := sum{w_orig[j], j=[i..target[i]]} # y[i] := sum{y_orig[j]*w_orig[j], j=[i..target[i]]} / w[i] with nogil: i = 0 while i < n: k = target[i] + 1 if k == n: break if y[i] < y[k]: i = k continue sum_wy = w[i] * y[i] sum_w = w[i] while True: # We are within a decreasing subsequence. prev_y = y[k] sum_wy += w[k] * y[k] sum_w += w[k] k = target[k] + 1 if k == n or prev_y < y[k]: # Non-singleton decreasing subsequence is finished, # update first entry. y[i] = sum_wy / sum_w w[i] = sum_w target[i] = k - 1 target[k - 1] = i if i > 0: # Backtrack if we can. This makes the algorithm # single-pass and ensures O(n) complexity. i = target[i - 1] # Otherwise, restart from the same point. break # Reconstruct the solution. i = 0 while i < n: k = target[i] + 1 y[i + 1 : k] = y[i] i = k def _make_unique(const floating[::1] X, const floating[::1] y, const floating[::1] sample_weights): """Average targets for duplicate X, drop duplicates. Aggregates duplicate X values into a single X value where the target y is a (sample_weighted) average of the individual targets. Assumes that X is ordered, so that all duplicates follow each other. """ unique_values = len(np.unique(X)) if floating is float: dtype = np.float32 else: dtype = np.float64 cdef floating[::1] y_out = np.empty(unique_values, dtype=dtype) cdef floating[::1] x_out = np.empty_like(y_out) cdef floating[::1] weights_out = np.empty_like(y_out) cdef floating current_x = X[0] cdef floating current_y = 0 cdef floating current_weight = 0 cdef int i = 0 cdef int j cdef floating x cdef int n_samples = len(X) cdef floating eps = np.finfo(dtype).resolution for j in range(n_samples): x = X[j] if x - current_x >= eps: # next unique value x_out[i] = current_x weights_out[i] = current_weight y_out[i] = current_y / current_weight i += 1 current_x = x current_weight = sample_weights[j] current_y = y[j] * sample_weights[j] else: current_weight += sample_weights[j] current_y += y[j] * sample_weights[j] x_out[i] = current_x weights_out[i] = current_weight y_out[i] = current_y / current_weight return( np.asarray(x_out[:i+1]), np.asarray(y_out[:i+1]), np.asarray(weights_out[:i+1]), )