Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
ipd_extended/id_deterioration.py
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
158 lines (130 sloc)
5.51 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
import data_generator as dg | |
import pandas as pd | |
import math | |
import interaction_distance as id | |
import footprint as f | |
import re | |
import matplotlib.pyplot as plt | |
import main | |
from correlation_measures.binning import Binning | |
import numpy as np | |
import merging as m | |
import subspace_mining as sm | |
import constants as cst | |
import operator | |
import functools | |
def plot(curr_x, points, scatter=False, threshold=None, scaled_x=True, vert_lines=None, dims=None): | |
global plot_id | |
curr_x = [i for i in range(len(points))] if not scaled_x else curr_x | |
ax = plt.subplot(plot_rows, plot_cols, plot_id) | |
if scatter: | |
plt.scatter(curr_x, points, s=0.5) | |
else: | |
# ax.set_ylim([-0.01, 0.12]) | |
plt.plot(curr_x[:-1], points) | |
plt.xlabel("dimension values") | |
plt.ylabel("ID values") | |
if dims is not None: | |
plt.title(str(dims) + " dimensions") | |
if threshold is not None: | |
plt.axhline(threshold, min(curr_x), max(curr_x), color='r') | |
if scaled_x and vert_lines is not None: | |
for line in vert_lines: | |
plt.axvline(line, color='r') | |
plot_id += 1 | |
def plot_data_2d(data): | |
ax = plt.subplot(plot_rows, plot_cols, plot_id) | |
data = pd.DataFrame(data) | |
color_cond = {'g': data[2] == 0, | |
'r': data[2] == 1, | |
} | |
for c in color_cond: | |
ax.scatter(data[0][color_cond[c]], data[1][color_cond[c]], s=1, c=c) | |
plt.xlabel("dim 0") | |
plt.ylabel("dim 1") | |
def plot_data_3d(data, cols, colored=False): | |
data = pd.DataFrame(data) | |
global plot_id | |
ax = plt.subplot(plot_rows, plot_cols, plot_id, projection='3d') | |
plot_id += 1 | |
# ax.set_xlim(-3, 3) | |
# ax.set_ylim(-3, 3) | |
# ax.set_zlim(-3, 3) | |
# colored | |
if colored: | |
color_cond = {'b': data[3] == 0, | |
'r': data[3] == 1, | |
'k': data[3] == 2, | |
'g': data[3] == 3, | |
'm': data[3] == 4, | |
} | |
for c in color_cond: | |
ax.scatter(data[0][color_cond[c]], data[1][color_cond[c]], data[2][color_cond[c]], c=c, s=1) | |
## without coloring | |
else: | |
ax.scatter(data[cols[0]], data[cols[1]], data[cols[2]], c='k', s=1) | |
ax.set_xlabel('X0') | |
ax.set_ylabel('X1') | |
ax.set_zlabel('X2') | |
def run_for_subspace(binning, curr, curr_dim_maxes, curr_data): | |
bin_map = binning.equal_frequency_binning_by_rank() | |
dist_bins = bin_map.cat.categories | |
bins_right_bounds = [data.loc[binning.rank_data[binning.rank_data[curr] == math.floor( | |
float(re.search(', (-*\d+\.*\d*e*[+-]*\d*)', dist_bins[i]).group(1)))].index.tolist()[0], curr] for i in | |
range(len(dist_bins))] | |
scaled_x = True | |
t = time.time() | |
IDs = id.compute_IDs(bin_map, curr, curr_data, curr_dim_maxes) | |
print("ID runtime", time.time() - t) | |
plot(bins_right_bounds, IDs, scaled_x=scaled_x, dims=len(curr_dim_maxes)) | |
def compute_disc(IDs, binning, dist_bins, bps, threshold): | |
ID_threshold = id.compute_ID_threshold(IDs, threshold) | |
F, discretizations = m.dynamic_merging(ID_threshold, IDs, init_bins_count, bps) | |
min_id = np.argmin(F[-1]) | |
discretization = discretizations[-1][min_id] | |
(curr_macro_intervals, curr_macro_points) = main.get_discretized_points(curr, data, discretization, | |
dist_bins, binning.rank_data) | |
return curr_macro_intervals | |
def compute_disc_minhash(footprints, binning, dist_bins, footprint_diffs=None): | |
# ID_threshold = id.compute_ID_threshold(curr_points, IDs, idts) | |
# t = time.time() | |
F, discretizations = m.dynamic_merging_footprints(footprints, init_bins_count, footprint_diffs) | |
# print('merging', time.time() - t) | |
min_id = np.argmin(F[-1]) | |
discretization = discretizations[-1][min_id] | |
(curr_macro_intervals, curr_macro_points) = main.get_discretized_points(curr, data, discretization, | |
dist_bins, binning.rank_data) | |
return curr_macro_intervals | |
cube_rows = 5000 | |
# data_gen = dg.produce_xor_generator(3, 4, 'bla', distribution='uniform', rows=cube_rows, offset=(0, 0)) | |
# data_gen = dg.produce_random_generator(20, 'bla', rows=cube_rows) | |
data_gen = dg.produce_cube_generator(3, 100, 1, 1, 'bla', cube_rows, "uniform") | |
subspaces = data_gen.subspaces | |
print('subspaces', subspaces) | |
build = data_gen.build() | |
data = pd.DataFrame(build[0]) | |
# print('disc', build[1]) | |
# subspace_map = main.get_map_from_subspace_set(subspaces) | |
# data = pd.read_csv("new_cubes/cubes_n1000_r4_i1_c1.csv", delimiter=';', header=None, na_values='?') | |
# print('data maxes', data.max(0)) | |
# print('data mins', data.min(0)) | |
class_labels = data.pop(data.shape[1] - 1) | |
data = data.apply(lambda x: 2 * cst.NORMALIZATION_RADIUS * (x - x.min()) / ( | |
x.max() - x.min()) - cst.NORMALIZATION_RADIUS if x.max() != x.min() else pd.Series(-np.ones(x.shape))) | |
dim_maxes = data.max(0) | |
init_bins_count = int(math.ceil(math.sqrt(data.shape[0]))) # ceil in original ipd... | |
print('init_bins_count', init_bins_count) | |
plot_rows = 1 | |
plot_cols = 3 | |
plot_id = 1 | |
curr = 0 | |
binning = Binning(data, curr, init_bins_count) | |
for curr_subspace in [[i for i in range(3)], [i for i in range(4)], [i for i in range(6)]]: | |
# for curr_subspace in [[0, 1, 2]]: | |
# for curr_subspace in [[0, 1, 2, 3]]: | |
print('curr_subspace', curr, curr_subspace) | |
curr_data = data.copy().loc[:, curr_subspace] | |
curr_dim_maxes = dim_maxes[curr_subspace] | |
run_for_subspace(binning, curr, curr_dim_maxes, curr_data) | |
plt.show() |