Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Jahanvi rajput patch 1 #44

Open
wants to merge 58 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
6936688
setCover.py
JahanviRajput Jan 7, 2024
a266b9d
setCover.py
JahanviRajput Jan 7, 2024
78f51b0
Create NaiveGreedyOptimizer.py
JahanviRajput Jan 16, 2024
47f571e
Delete cpp/optimizers/NaiveGreedy.py
JahanviRajput Jan 16, 2024
8a2603d
Create SetFunction.py
JahanviRajput Jan 16, 2024
95e7cd2
Create LazierThanLazyGreedyOptimizer.py
JahanviRajput Jan 16, 2024
0715bb1
Create LazyGreedyOptimizer.py
JahanviRajput Jan 16, 2024
3820a32
Create NaiveGreedyOptimizer.py
JahanviRajput Jan 16, 2024
5dfbdf4
Create StochasticGreedyOptimizer.py
JahanviRajput Jan 16, 2024
75bfd10
Create SetCover.py
JahanviRajput Jan 16, 2024
f9b07b9
Delete cpp/SetFunction.py
JahanviRajput Jan 17, 2024
40178a3
Create SetFunction.py
JahanviRajput Jan 17, 2024
4386d15
Create SetCover.py
JahanviRajput Jan 17, 2024
b5659d8
Delete cpp/submod/SetCover.py
JahanviRajput Jan 17, 2024
820c78f
Create LazierThanLazyGreedyOptimizer.py
JahanviRajput Jan 17, 2024
63c9e06
Create LazyGreedyOptimizer.py
JahanviRajput Jan 17, 2024
ba15c2f
Create NaiveGreedyOptimizer.py
JahanviRajput Jan 17, 2024
1259666
Create StochasticGreedyOptimizer.py
JahanviRajput Jan 17, 2024
99f9870
Update SetFunction.py
JahanviRajput Jan 17, 2024
eba36f7
Delete pytorch/optimizer/StochasticGreedyOptimizer.py
JahanviRajput Jan 17, 2024
eef8085
Create StochasticGreedyOptimizer
JahanviRajput Jan 17, 2024
cb235d9
Create __init__.py
JahanviRajput Jan 17, 2024
92be253
Delete cpp/optimizers/LazierThanLazyGreedyOptimizer.py
JahanviRajput Jan 17, 2024
d2a9aba
Delete cpp/optimizers/LazyGreedyOptimizer.py
JahanviRajput Jan 17, 2024
50ec126
Delete cpp/optimizers/NaiveGreedyOptimizer.py
JahanviRajput Jan 17, 2024
4276916
Delete cpp/optimizers/StochasticGreedyOptimizer.py
JahanviRajput Jan 17, 2024
9c997e7
Create ProbabilisticSetCover.py
JahanviRajput Jan 17, 2024
52a30c5
Update SetCover.py
JahanviRajput Jan 17, 2024
036d04b
Create __init__.py
JahanviRajput Jan 17, 2024
1d7d014
Update ProbabilisticSetCover.py
JahanviRajput Jan 17, 2024
694f358
Update __init__.py
JahanviRajput Jan 17, 2024
698f1f6
Rename StochasticGreedyOptimizer to StochasticGreedyOptimize.pyr
JahanviRajput Jan 19, 2024
00f7f9f
Rename StochasticGreedyOptimize.pyr to StochasticGreedyOptimize.py
JahanviRajput Jan 19, 2024
f268159
Update setCover.py
JahanviRajput Jan 19, 2024
4bc9555
Update setCover.py
JahanviRajput Jan 23, 2024
2a27493
Update setup.py
JahanviRajput Jan 23, 2024
aeaaaef
Fixed dependency tree for submodlib GPU implementation
amajee11us Jan 23, 2024
57397c7
Added fix for null object issue
amajee11us Jan 24, 2024
ed70d00
Create GraphCut.py
JahanviRajput Jan 30, 2024
52828be
Update __init__.py
JahanviRajput Jan 30, 2024
f8765d9
Update GraphCut.py
JahanviRajput Jan 30, 2024
72e82c9
pytorch version of helper.py
JahanviRajput Jan 31, 2024
7cc626e
dense mode of with helper functions GraphCut.py
JahanviRajput Feb 4, 2024
766e432
Required functions of helper.py
JahanviRajput Feb 4, 2024
1efe9bf
Function of dense mode only FacilityLocation.py
JahanviRajput Feb 4, 2024
7fac287
Function with all modes implemented DisparityMin.py
JahanviRajput Feb 6, 2024
1cac380
All modes are implemented DisparitySum.py
JahanviRajput Feb 6, 2024
9464fb8
Dense mode is implemented GraphCut.py
JahanviRajput Feb 6, 2024
fbbd7ba
Dense mode is implemented GraphCut.py
JahanviRajput Feb 6, 2024
0fb08d6
Function with all modes implemented DisparityMin.py
JahanviRajput Feb 6, 2024
a026e5b
Function with all modes implemented DisparitySum.py
JahanviRajput Feb 6, 2024
078194a
Function of dense mode only FacilityLocation.py
JahanviRajput Feb 6, 2024
5265d84
Update SetCover.py
JahanviRajput Feb 6, 2024
6cf311b
Update __init__.py
JahanviRajput Feb 6, 2024
de6ae41
Dense mode is done LogDeterminant.py
JahanviRajput Feb 6, 2024
6e9eee5
Create_kernel_sklearn updated for batchwise calculation on cuda
JahanviRajput Feb 12, 2024
4e2eab6
Cuda facilityLocation.py
JahanviRajput Feb 13, 2024
b5a0f2a
coda facilityLocation.py
JahanviRajput Feb 13, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 66 additions & 0 deletions pytorch/SetFunction.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
from typing import Set, List, Tuple
import numpy as np
import torch
import torch.nn as nn
import numpy as np
import random
from pytorch.optimizer.LazierThanLazyGreedyOptimizer import LazierThanLazyGreedyOptimizer
from pytorch.optimizer.LazyGreedyOptimizer import LazyGreedyOptimizer
from pytorch.optimizer.NaiveGreedyOptimizer import NaiveGreedyOptimizer
from pytorch.optimizer.StochasticGreedyOptimizer import StochasticGreedyOptimizer


class SetFunction(nn.Module):
def __init__(self):
pass

def evaluate(self, X: Set[int]) -> float:
return self.evaluate(X)

def evaluate_with_memoization(self, X: Set[int]) -> float:
return self.evaluate_with_memoization(X)

def marginal_gain(self, X: Set[int], item: int) -> float:
return self.marginal_gain(X, item)

def marginal_gain_with_memoization(self, X: Set[int], item: int, enable_checks: bool = True) -> float:
return self.marginal_gain_with_memoization(X, item)

def update_memoization(self, X: Set[int], item: int) -> None:
return self.update_memoization(X, item)


def get_effective_ground_set(self) -> Set[int]:
return self.get_effective_ground_set()

def maximize(self, optimizer: str, budget: float, stopIfZeroGain: bool, stopIfNegativeGain: bool, verbose: bool,
costs: List[float] = None, cost_sensitive_greedy: bool = False, show_progress: bool = False, epsilon: float = 0.0) -> List[Tuple[int, float]]:
optimizer = self._get_optimizer(optimizer)
if optimizer:
return optimizer.maximize(self, budget, stopIfZeroGain, stopIfZeroGain, verbose, show_progress, costs, cost_sensitive_greedy)
else:
print("Invalid Optimizer")
return []

def _get_optimizer(self, optimizer_name: str):
if optimizer_name == "NaiveGreedy":
return NaiveGreedyOptimizer()
# define all optimizer classed into files
elif optimizer_name == "LazyGreedy":
return LazyGreedyOptimizer()
elif optimizer_name == "StochasticGreedy":
return StochasticGreedyOptimizer()
elif optimizer_name == "LazierThanLazyGreedy":
return LazierThanLazyGreedyOptimizer()
else:
return None

def cluster_init(self, n: int, k_dense: List[List[float]], ground: Set[int],
partial: bool, lambda_: float) -> None:
self.cluster_init(n, k_dense, ground, partial, lambda_)

def set_memoization(self, X: Set[int]) -> None:
self.set_memoization(X)

def clear_memoization(self) -> None:
self.clear_memoization()
5 changes: 5 additions & 0 deletions pytorch/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# /pytorch/__init__.py
from .SetFunction import SetFunction

from .optimizer import *
from .submod import *
120 changes: 120 additions & 0 deletions pytorch/optimizer/LazierThanLazyGreedyOptimizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
import random
import math

class LazierThanLazyGreedyOptimizer:
def __init__(self):
pass

@staticmethod
def equals(val1, val2, eps):
return abs(val1 - val2) < eps

@staticmethod
def print_sorted_set(sorted_set):
print("[", end="")
for val, elem in sorted_set:
print(f"({val}, {elem}), ", end="")
print("]")

def maximize(self, f_obj, budget, stop_if_zero_gain=False, stop_if_negative_gain=False,
epsilon=0.1, verbose=False, show_progress=False, costs=None, cost_sensitive_greedy=False):
greedy_vector = []
greedy_set = set()

if costs is None:
greedy_vector.reserve(budget)
greedy_set.reserve(budget)

rem_budget = budget
remaining_set = set(f_obj.get_effective_ground_set())
n = len(remaining_set)
epsilon = 0.05
random_set_size = int((n / budget) * math.log(1 / epsilon))

if verbose:
print(f"Epsilon = {epsilon}")
print(f"Random set size = {random_set_size}")
print("Ground set:")
print(remaining_set)
print(f"Num elements in ground set = {len(remaining_set)}")
print("Starting the LazierThanLazy greedy algorithm")
print("Initial greedy set:")
print(greedy_set)

f_obj.clear_memoization()
best_id = None
best_val = None

i = 0
step = 1
display_next = step
percent = 0
N = rem_budget
iter_count = 0

while rem_budget > 0:
random_set = set()
while len(random_set) < random_set_size:
elem = random.randint(0, n - 1)
if elem in remaining_set and elem not in random_set:
random_set.add(elem)

if verbose:
print(f"Iteration {i}")
print(f"Random set = {random_set}")
print("Now running lazy greedy on the random set")

candidate_id = None
candidate_val = None
new_candidate_bound = None

# Compute gains only for the elements in the remaining set
gains = [(f_obj.marginal_gain_with_memoization(greedy_set, elem, False), elem)
for elem in remaining_set]

for j, (val, elem) in enumerate(sorted(gains, key=lambda x: (-x[0], x[1]))):
if elem in random_set and elem not in greedy_set: # Check if the element is not already selected
if verbose:
print(f"Checking {elem}...")
candidate_id = elem
candidate_val = val
new_candidate_bound = f_obj.marginal_gain_with_memoization(greedy_set, candidate_id, False)
if verbose:
print(f"Updated gain as per updated greedy set = {new_candidate_bound}")
next_elem = gains[j + 1] if j + 1 < len(gains) else None
if new_candidate_bound >= next_elem[0] if next_elem else float('-inf'):
if verbose:
print("..better than next best upper bound, "
"selecting...")
best_id = candidate_id
best_val = new_candidate_bound
break

if verbose:
print(f"Next best item to add is {best_id} and its value addition is {best_val}")

remaining_set.remove(best_id)

if (best_val < 0 and stop_if_negative_gain) or (self.equals(best_val, 0, 1e-5) and stop_if_zero_gain):
break
else:
f_obj.update_memoization(greedy_set, best_id)
greedy_set.add(best_id)
greedy_vector.append((best_id, best_val))
rem_budget -= 1

if verbose:
print(f"Added element {best_id} and the gain is {best_val}")
print("Updated greedy set:", greedy_set)

if show_progress:
percent = int(((iter_count + 1.0) / N) * 100)
if percent >= display_next:
print("\r", "[" + "|" * (percent // 5) + " " * (100 // 5 - percent // 5) + "]", end="")
print(f" {percent}% [Iteration {iter_count + 1} of {N}]", end="")
display_next += step
iter_count += 1

i += 1

return greedy_vector
97 changes: 97 additions & 0 deletions pytorch/optimizer/LazyGreedyOptimizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
import torch
import heapq

class LazyGreedyOptimizer:
def __init__(self):
pass

@staticmethod
def equals(val1, val2, eps):
return abs(val1 - val2) < eps

def maximize(self, f_obj, budget, stop_if_zero_gain, stop_if_negative_gain,
verbose, show_progress, costs, cost_sensitive_greedy):
greedy_vector = []
greedy_set = set()

# if not costs:
# greedy_vector.reserve(budget)
# greedy_set.reserve(budget)

rem_budget = budget
ground_set = f_obj.get_effective_ground_set()

if verbose:
print("Ground set:")
print(ground_set)
print(f"Num elements in groundset = {len(ground_set)}")
print("Costs:")
print(costs)
print(f"Cost sensitive greedy: {cost_sensitive_greedy}")
print("Starting the lazy greedy algorithm")
print("Initial greedy set:")
print(greedy_set)

f_obj.clear_memoization()

container = []
heapq.heapify(container)
max_heap = container

if cost_sensitive_greedy:
for elem in ground_set:
gain = f_obj.marginal_gain_with_memoization(greedy_set, elem, False) / costs[elem]
heapq.heappush(max_heap, (-gain, elem))
else:
for elem in ground_set:
gain = f_obj.marginal_gain_with_memoization(greedy_set, elem, False)
heapq.heappush(max_heap, (-gain, elem))

if verbose:
print("Max heap constructed")

step = 1
display_next = step
percent = 0
N = rem_budget
iter = 0

while rem_budget > 0 and max_heap:
current_max = heapq.heappop(max_heap)
current_max_gain, current_max_elem = -current_max[0], current_max[1]

if verbose:
print(f"currentMax element: {current_max_elem} and its upper bound: {current_max_gain}")

new_max_bound = f_obj.marginal_gain_with_memoization(greedy_set, current_max_elem, False)

if verbose:
print(f"newMaxBound: {new_max_bound}")

if new_max_bound >= -max_heap[0][0]:
if (new_max_bound < 0 and stop_if_negative_gain) or \
(self.equals(new_max_bound, 0, 1e-5) and stop_if_zero_gain):
break
else:
f_obj.update_memoization(greedy_set, current_max_elem)
greedy_set.add(current_max_elem)
greedy_vector.append((current_max_elem, new_max_bound))
rem_budget -= 1

if verbose:
print(f"Added element {current_max_elem} and the gain is {new_max_bound}")
print("Updated greedySet:", greedy_set)

if show_progress:
percent = int(((iter + 1.0) / N) * 100)

if percent >= display_next:
print(f"\r[{'|' * (percent // 5)}{' ' * (100 // 5 - percent // 5)}]",
end=f" {percent}% [Iteration {iter + 1} of {N}]")
display_next += step

iter += 1
else:
heapq.heappush(max_heap, (-new_max_bound, current_max_elem))

return greedy_vector
90 changes: 90 additions & 0 deletions pytorch/optimizer/NaiveGreedyOptimizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
import torch
import random
from typing import List, Tuple, Set

class NaiveGreedyOptimizer:
def __init__(self):
pass

@staticmethod
def equals(val1, val2, eps):
return abs(val1 - val2) < eps

def maximize(
self, f_obj, budget, stop_if_zero_gain, stopIfNegativeGain, verbose, show_progress, costs, cost_sensitive_greedy
):
greedy_vector = []
greedy_set = set()
if not costs:
# greedy_vector = [None] * budget
greedy_set = set()
rem_budget = budget
ground_set = f_obj.get_effective_ground_set()
#print(ground_set)
if verbose:
print("Ground set:")
print(ground_set)
print(f"Num elements in groundset = {len(ground_set)}")
print("Costs:")
print(costs)
print(f"Cost sensitive greedy: {cost_sensitive_greedy}")
print("Starting the naive greedy algorithm")
print("Initial greedy set:")
print(greedy_set)

f_obj.clear_memoization()
best_id = None
best_val = None
step = 1
display_next = step
percent = 0
N = rem_budget
iter_count = 0

while rem_budget > 0:
best_id = None
best_val = float("-inf")

for i in ground_set:
if i in greedy_set:
continue
gain = f_obj.marginal_gain_with_memoization(greedy_set, i, False)
# print(gain)
if verbose:
print(f"Gain of {i} is {gain}")

if gain > best_val:
best_id = i
best_val = gain

if verbose:
print(f"Next best item to add is {best_id} and its value addition is {best_val}")

if (best_val < 0 and stopIfNegativeGain) or (
self.equals(best_val, 0, 1e-5) and stop_if_zero_gain
):
break
else:
f_obj.update_memoization(greedy_set, best_id)
greedy_set.add(best_id)
greedy_vector.append((best_id, best_val))
rem_budget -= 1

if verbose:
print(f"Added element {best_id} and the gain is {best_val}")
print(f"Updated greedy set: {greedy_set}")

if show_progress:
percent = int((iter_count + 1.0) / N * 100)

if percent >= display_next:
print(
f"\r[{'|' * (percent // 5)}{' ' * (100 // 5 - percent // 5)}]",
end="",
)
print(f"{percent}% [Iteration {iter_count + 1} of {N}]", end="")
display_next += step

iter_count += 1

return greedy_vector
Loading