diff --git a/.github/workflows/test_and_build.yaml b/.github/workflows/test_and_build.yaml index 59f4c2dc..2ccbb5da 100644 --- a/.github/workflows/test_and_build.yaml +++ b/.github/workflows/test_and_build.yaml @@ -74,7 +74,7 @@ jobs: run: | export RUBIN_SIM_DATA_DIR=~/rubin_sim_data #pytest -r a -v - pytest -r a -v --cov=rubin_scheduler --cov=tests --cov-report=xml --cov-report=term --cov-branch + pytest -r a -v --cov=rubin_scheduler --cov-report=xml --cov-report=term --cov-branch - name: Upload coverage to codecov uses: codecov/codecov-action@v2 diff --git a/rubin_scheduler/scheduler/surveys/base_survey.py b/rubin_scheduler/scheduler/surveys/base_survey.py index 76193ae3..453122dd 100644 --- a/rubin_scheduler/scheduler/surveys/base_survey.py +++ b/rubin_scheduler/scheduler/surveys/base_survey.py @@ -8,13 +8,14 @@ import pandas as pd from rubin_scheduler.scheduler.detailers import ZeroRotDetailer -from rubin_scheduler.scheduler.thomson import thetaphi2xyz, xyz2thetaphi from rubin_scheduler.scheduler.utils import ( HpInComcamFov, HpInLsstFov, comcam_tessellate, empty_observation, set_default_nside, + thetaphi2xyz, + xyz2thetaphi, ) from rubin_scheduler.site_models import _read_fields diff --git a/rubin_scheduler/scheduler/thomson/__init__.py b/rubin_scheduler/scheduler/thomson/__init__.py deleted file mode 100644 index 55c2a99b..00000000 --- a/rubin_scheduler/scheduler/thomson/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .thomson import * diff --git a/rubin_scheduler/scheduler/thomson/thomson.py b/rubin_scheduler/scheduler/thomson/thomson.py deleted file mode 100644 index d8fa07ad..00000000 --- a/rubin_scheduler/scheduler/thomson/thomson.py +++ /dev/null @@ -1,348 +0,0 @@ -__all__ = ( - "thetaphi2xyz", - "even_points", - "elec_potential", - "ang_potential", - "fib_sphere_grid", - "iterate_potential_random", - "iterate_potential_smart", - "even_points_xyz", - "elec_potential_xyz", - "xyz2thetaphi", -) - -import numpy as np -from scipy.optimize import minimize - -from rubin_scheduler.utils import _angular_separation - - -def thetaphi2xyz(theta, phi): - x = np.sin(phi) * np.cos(theta) - y = np.sin(phi) * np.sin(theta) - z = np.cos(phi) - return x, y, z - - -def xyz2thetaphi(x, y, z): - phi = np.arccos(z) - theta = np.arctan2(y, x) - return theta, phi - - -def elec_potential(x0): - """ - Compute the potential energy for electrons on a sphere - - Parameters - ---------- - x0 : array - First half of x0 or theta values, secnd half phi - - Returns - ------- - Potential energy - """ - - theta = x0[0 : int(x0.size / 2)] - phi = x0[int(x0.size / 2) :] - - x, y, z = thetaphi2xyz(theta, phi) - # Distance squared - dsq = 0.0 - - indices = np.triu_indices(x.size, k=1) - - for coord in [x, y, z]: - coord_i = np.tile(coord, (coord.size, 1)) - coord_j = coord_i.T - d = (coord_i[indices] - coord_j[indices]) ** 2 - dsq += d - - U = np.sum(1.0 / np.sqrt(dsq)) - return U - - -def potential_single(coord0, x, y, z): - """ - Find the potential contribution from a single point. - """ - - x0 = coord0[0] - y0 = coord0[1] - z0 = coord0[2] - # Enforce point has to be on a sphere - rsq = x0**2 + y0**2 + z0**2 - r = np.sqrt(rsq) - x0 = x0 / r - y0 = y0 / r - z0 = z0 / r - - dsq = (x - x0) ** 2 + (y - y0) ** 2 + (z - z0) ** 2 - U = np.sum(1.0 / np.sqrt(dsq)) - return U - - -def xyz2_u(x, y, z): - """ - compute the potential - """ - dsq = 0.0 - - indices = np.triu_indices(x.size, k=1) - - for coord in [x, y, z]: - coord_i = np.tile(coord, (coord.size, 1)) - coord_j = coord_i.T - dsq += (coord_i[indices] - coord_j[indices]) ** 2 - - d = np.sqrt(dsq) - U = np.sum(1.0 / d) - return U - - -def iterate_potential_smart(x0, stepfrac=0.1): - """ - Calculate the change in potential by shifting points in theta and phi directions - # wow, that sure didn't work at all. - """ - - theta = x0[0 : x0.size / 2] - phi = x0[x0.size / 2 :] - x, y, z = thetaphi2xyz(theta, phi) - u_input = xyz2_u(x, y, z) - - # Now to loop over each point, and find where it's potenital minimum would be, and move it - # half-way there. - xyz_new = np.zeros((x.size, 3), dtype=float) - mask = np.ones(x.size, dtype=bool) - for i in np.arange(x.size): - mask[i] = 0 - fit = minimize(potential_single, [x[i], y[i], z[i]], args=(x[mask], y[mask], z[mask])) - mask[i] = 1 - xyz_new[i] = fit.x / np.sqrt(np.sum(fit.x**2)) - - xyz_input = np.array((x, y, z)).T - diff = xyz_input - xyz_new - - # Move half way in x-y-z space - xyz_out = xyz_input + stepfrac * diff - # Project back onto sphere - xyz_out = xyz_out.T / np.sqrt(np.sum(xyz_out**2, axis=1)) - u_new = xyz2_u(xyz_out[0, :], xyz_out[1, :], xyz_out[2, :]) - theta, phi = xyz2thetaphi(xyz_out[0, :], xyz_out[1, :], xyz_out[2, :]) - return np.concatenate((theta, phi)), u_new - - -def iterate_potential_random(x0, stepsize=0.05): - """ - Given a bunch of theta,phi values, shift things around to minimize potential - """ - - theta = x0[0 : int(x0.size / 2)] - phi = x0[int(x0.size / 2) :] - - x, y, z = thetaphi2xyz(theta, phi) - # Distance squared - dsq = 0.0 - - indices = np.triu_indices(x.size, k=1) - - for coord in [x, y, z]: - coord_i = np.tile(coord, (coord.size, 1)) - coord_j = coord_i.T - d = (coord_i[indices] - coord_j[indices]) ** 2 - dsq += d - - d = np.sqrt(dsq) - - u_input = 1.0 / d - - # offset everything by a random ammount - x_new = x + np.random.random(theta.size) * stepsize - y_new = y + np.random.random(theta.size) * stepsize - z_new = z + np.random.random(theta.size) * stepsize - - r = (x_new**2 + y_new**2 + z_new**2) ** 0.5 - # put back on the sphere - x_new = x_new / r - y_new = y_new / r - z_new = z_new / r - - dsq_new = 0 - for coord, coord_new in zip([x, y, z], [x_new, y_new, z_new]): - coord_i_new = np.tile(coord_new, (coord_new.size, 1)) - coord_j = coord_i_new.T - d_new = (coord_i_new[indices] - coord_j[indices]) ** 2 - dsq_new += d_new - u_new = 1.0 / np.sqrt(dsq_new) - - u_diff = np.sum(u_new) - np.sum(u_input) - if u_diff > 0: - return x0, 0.0 - else: - theta, phi = xyz2thetaphi(x_new, y_new, z_new) - return np.concatenate((theta, phi)), u_diff - - -def ang_potential(x0): - """ - If distance is computed along sphere rather than through 3-space. - """ - theta = x0[0 : int(x0.size / 2)] - phi = np.pi / 2 - x0[int(x0.size / 2) :] - - indices = np.triu_indices(theta.size, k=1) - - theta_i = np.tile(theta, (theta.size, 1)) - theta_j = theta_i.T - phi_i = np.tile(phi, (phi.size, 1)) - phi_j = phi_i.T - d = _angular_separation(theta_i[indices], phi_i[indices], theta_j[indices], phi_j[indices]) - U = np.sum(1.0 / d) - return U - - -def fib_sphere_grid(npoints): - """ - Use a Fibonacci spiral to distribute points uniformly on a sphere. - - based on https://people.sc.fsu.edu/~jburkardt/py_src/sphere_fibonacci_grid/sphere_fibonacci_grid_points.py - - Returns theta and phi in radians - """ - - phi = (1.0 + np.sqrt(5.0)) / 2.0 - - i = np.arange(npoints, dtype=float) - i2 = 2 * i - (npoints - 1) - theta = (2.0 * np.pi * i2 / phi) % (2.0 * np.pi) - sphi = i2 / npoints - phi = np.arccos(sphi) - return theta, phi - - -def even_points(npts, use_fib_init=True, method="CG", potential_func=elec_potential, maxiter=None): - """ - Distribute npts over a sphere and minimize their potential, making them - "evenly" distributed - - Starting with the Fibonacci spiral speeds things up by ~factor of 2. - """ - - if use_fib_init: - # Start with fibonacci spiral guess - theta, phi = fib_sphere_grid(npts) - else: - # Random on a sphere - theta = np.random.rand(npts) * np.pi * 2.0 - phi = np.arccos(2.0 * np.random.rand(npts) - 1.0) - - x = np.concatenate((theta, phi)) - # XXX--need to check if this is the best minimizer - min_fit = minimize(potential_func, x, method="CG", options={"maxiter": maxiter}) - - x = min_fit.x - theta = x[0 : int(x.size / 2)] - phi = x[int(x.size / 2) :] - # Looks like I get the same energy values as https://en.wikipedia.org/wiki/Thomson_problem - return theta, phi - - -def elec_potential_xyz(x0): - x0 = x0.reshape(3, int(x0.size / 3)) - x = x0[0, :] - y = x0[1, :] - z = x0[2, :] - dsq = 0.0 - - r = np.sqrt(x**2 + y**2 + z**2) - x = x / r - y = y / r - z = z / r - indices = np.triu_indices(x.size, k=1) - - for coord in [x, y, z]: - coord_i = np.tile(coord, (coord.size, 1)) - coord_j = coord_i.T - d = (coord_i[indices] - coord_j[indices]) ** 2 - dsq += d - - U = np.sum(1.0 / np.sqrt(dsq)) - return U - - -def elec_p_xyx_loop(x0): - """do this with a brutal loop that can be numba ified""" - x0 = x0.reshape(3, int(x0.size / 3)) - x = x0[0, :] - y = x0[1, :] - z = x0[2, :] - U = 0.0 - - r = np.sqrt(x**2 + y**2 + z**2) - x = x / r - y = y / r - z = z / r - - npts = x.size - for i in range(npts - 1): - for j in range(i + 1, npts): - dsq = (x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2 + (z[i] - z[j]) ** 2 - U += 1.0 / np.sqrt(dsq) - return U - - -def x02sphere(x0): - x0 = x0.reshape(3, int(x0.size / 3)) - x = x0[0, :] - y = x0[1, :] - z = x0[2, :] - - r = np.sqrt(x**2 + y**2 + z**2) - x = x / r - y = y / r - z = z / r - - return np.concatenate((x, y, z)) - - -def even_points_xyz( - npts, - use_fib_init=True, - method="CG", - potential_func=elec_p_xyx_loop, - maxiter=None, - callback=None, - verbose=True, -): - """ - Distribute npts over a sphere and minimize their potential, making them - "evenly" distributed - - Starting with the Fibonacci spiral speeds things up by ~factor of 2. - """ - - if use_fib_init: - # Start with fibonacci spiral guess - theta, phi = fib_sphere_grid(npts) - else: - # Random on a sphere - theta = np.random.rand(npts) * np.pi * 2.0 - phi = np.arccos(2.0 * np.random.rand(npts) - 1.0) - - x = np.concatenate(thetaphi2xyz(theta, phi)) - - if verbose: - print("initial potential=", elec_potential_xyz(x)) - # XXX--need to check if this is the best minimizer - min_fit = minimize(potential_func, x, method="CG", options={"maxiter": maxiter}, callback=callback) - - if verbose: - print("final potential=", elec_potential_xyz(min_fit.x)) - print("niteration=", min_fit.nit) - - x = x02sphere(min_fit.x) - - # Looks like I get the same energy values as https://en.wikipedia.org/wiki/Thomson_problem - return x diff --git a/rubin_scheduler/scheduler/thomson/thomson_jit.py b/rubin_scheduler/scheduler/thomson/thomson_jit.py deleted file mode 100644 index eb2e462c..00000000 --- a/rubin_scheduler/scheduler/thomson/thomson_jit.py +++ /dev/null @@ -1,350 +0,0 @@ -__all__ = ( - "thetaphi2xyz", - "even_points", - "elec_potential", - "ang_potential", - "fib_sphere_grid", - "iterate_potential_random", - "iterate_potential_smart", - "even_points_xyz", - "elec_potential_xyz", - "xyz2thetaphi", -) - -import numpy as np -from numba import jit -from scipy.optimize import minimize - -from rubin_scheduler.utils import _angular_separation - - -def thetaphi2xyz(theta, phi): - x = np.sin(phi) * np.cos(theta) - y = np.sin(phi) * np.sin(theta) - z = np.cos(phi) - return x, y, z - - -def xyz2thetaphi(x, y, z): - phi = np.arccos(z) - theta = np.arctan2(y, x) - return theta, phi - - -def elec_potential(x0): - """ - Compute the potential energy for electrons on a sphere - - Parameters - ---------- - x0 : array - First half of x0 or theta values, secnd half phi - - Returns - ------- - Potential energy - """ - - theta = x0[0 : int(x0.size / 2)] - phi = x0[int(x0.size / 2) :] - - x, y, z = thetaphi2xyz(theta, phi) - # Distance squared - dsq = 0.0 - - indices = np.triu_indices(x.size, k=1) - - for coord in [x, y, z]: - coord_i = np.tile(coord, (coord.size, 1)) - coord_j = coord_i.T - d = (coord_i[indices] - coord_j[indices]) ** 2 - dsq += d - - U = np.sum(1.0 / np.sqrt(dsq)) - return U - - -def potential_single(coord0, x, y, z): - """ - Find the potential contribution from a single point. - """ - - x0 = coord0[0] - y0 = coord0[1] - z0 = coord0[2] - # Enforce point has to be on a sphere - rsq = x0**2 + y0**2 + z0**2 - r = np.sqrt(rsq) - x0 = x0 / r - y0 = y0 / r - z0 = z0 / r - - dsq = (x - x0) ** 2 + (y - y0) ** 2 + (z - z0) ** 2 - U = np.sum(1.0 / np.sqrt(dsq)) - return U - - -def xyz2_u(x, y, z): - """ - compute the potential - """ - dsq = 0.0 - - indices = np.triu_indices(x.size, k=1) - - for coord in [x, y, z]: - coord_i = np.tile(coord, (coord.size, 1)) - coord_j = coord_i.T - dsq += (coord_i[indices] - coord_j[indices]) ** 2 - - d = np.sqrt(dsq) - U = np.sum(1.0 / d) - return U - - -def iterate_potential_smart(x0, stepfrac=0.1): - """ - Calculate the change in potential by shifting points in theta and phi directions - # wow, that sure didn't work at all. - """ - - theta = x0[0 : x0.size / 2] - phi = x0[x0.size / 2 :] - x, y, z = thetaphi2xyz(theta, phi) - u_input = xyz2_u(x, y, z) - - # Now to loop over each point, and find where it's potenital minimum would be, and move it - # half-way there. - xyz_new = np.zeros((x.size, 3), dtype=float) - mask = np.ones(x.size, dtype=bool) - for i in np.arange(x.size): - mask[i] = 0 - fit = minimize(potential_single, [x[i], y[i], z[i]], args=(x[mask], y[mask], z[mask])) - mask[i] = 1 - xyz_new[i] = fit.x / np.sqrt(np.sum(fit.x**2)) - - xyz_input = np.array((x, y, z)).T - diff = xyz_input - xyz_new - - # Move half way in x-y-z space - xyz_out = xyz_input + stepfrac * diff - # Project back onto sphere - xyz_out = xyz_out.T / np.sqrt(np.sum(xyz_out**2, axis=1)) - u_new = xyz2_u(xyz_out[0, :], xyz_out[1, :], xyz_out[2, :]) - theta, phi = xyz2thetaphi(xyz_out[0, :], xyz_out[1, :], xyz_out[2, :]) - return np.concatenate((theta, phi)), u_new - - -def iterate_potential_random(x0, stepsize=0.05): - """ - Given a bunch of theta,phi values, shift things around to minimize potential - """ - - theta = x0[0 : int(x0.size / 2)] - phi = x0[int(x0.size / 2) :] - - x, y, z = thetaphi2xyz(theta, phi) - # Distance squared - dsq = 0.0 - - indices = np.triu_indices(x.size, k=1) - - for coord in [x, y, z]: - coord_i = np.tile(coord, (coord.size, 1)) - coord_j = coord_i.T - d = (coord_i[indices] - coord_j[indices]) ** 2 - dsq += d - - d = np.sqrt(dsq) - - u_input = 1.0 / d - - # offset everything by a random ammount - x_new = x + np.random.random(theta.size) * stepsize - y_new = y + np.random.random(theta.size) * stepsize - z_new = z + np.random.random(theta.size) * stepsize - - r = (x_new**2 + y_new**2 + z_new**2) ** 0.5 - # put back on the sphere - x_new = x_new / r - y_new = y_new / r - z_new = z_new / r - - dsq_new = 0 - for coord, coord_new in zip([x, y, z], [x_new, y_new, z_new]): - coord_i_new = np.tile(coord_new, (coord_new.size, 1)) - coord_j = coord_i_new.T - d_new = (coord_i_new[indices] - coord_j[indices]) ** 2 - dsq_new += d_new - u_new = 1.0 / np.sqrt(dsq_new) - - u_diff = np.sum(u_new) - np.sum(u_input) - if u_diff > 0: - return x0, 0.0 - else: - theta, phi = xyz2thetaphi(x_new, y_new, z_new) - return np.concatenate((theta, phi)), u_diff - - -def ang_potential(x0): - """ - If distance is computed along sphere rather than through 3-space. - """ - theta = x0[0 : int(x0.size / 2)] - phi = np.pi / 2 - x0[int(x0.size / 2) :] - - indices = np.triu_indices(theta.size, k=1) - - theta_i = np.tile(theta, (theta.size, 1)) - theta_j = theta_i.T - phi_i = np.tile(phi, (phi.size, 1)) - phi_j = phi_i.T - d = _angular_separation(theta_i[indices], phi_i[indices], theta_j[indices], phi_j[indices]) - U = np.sum(1.0 / d) - return U - - -def fib_sphere_grid(npoints): - """ - Use a Fibonacci spiral to distribute points uniformly on a sphere. - - based on https://people.sc.fsu.edu/~jburkardt/py_src/sphere_fibonacci_grid/sphere_fibonacci_grid_points.py - - Returns theta and phi in radians - """ - - phi = (1.0 + np.sqrt(5.0)) / 2.0 - - i = np.arange(npoints, dtype=float) - i2 = 2 * i - (npoints - 1) - theta = (2.0 * np.pi * i2 / phi) % (2.0 * np.pi) - sphi = i2 / npoints - phi = np.arccos(sphi) - return theta, phi - - -def even_points(npts, use_fib_init=True, method="CG", potential_func=elec_potential, maxiter=None): - """ - Distribute npts over a sphere and minimize their potential, making them - "evenly" distributed - - Starting with the Fibonacci spiral speeds things up by ~factor of 2. - """ - - if use_fib_init: - # Start with fibonacci spiral guess - theta, phi = fib_sphere_grid(npts) - else: - # Random on a sphere - theta = np.random.rand(npts) * np.pi * 2.0 - phi = np.arccos(2.0 * np.random.rand(npts) - 1.0) - - x = np.concatenate((theta, phi)) - # XXX--need to check if this is the best minimizer - min_fit = minimize(potential_func, x, method="CG", options={"maxiter": maxiter}) - - x = min_fit.x - theta = x[0 : int(x.size / 2)] - phi = x[int(x.size / 2) :] - # Looks like I get the same energy values as https://en.wikipedia.org/wiki/Thomson_problem - return theta, phi - - -def elec_potential_xyz(x0): - x0 = x0.reshape(3, int(x0.size / 3)) - x = x0[0, :] - y = x0[1, :] - z = x0[2, :] - dsq = 0.0 - - r = np.sqrt(x**2 + y**2 + z**2) - x = x / r - y = y / r - z = z / r - indices = np.triu_indices(x.size, k=1) - - for coord in [x, y, z]: - coord_i = np.tile(coord, (coord.size, 1)) - coord_j = coord_i.T - d = (coord_i[indices] - coord_j[indices]) ** 2 - dsq += d - - U = np.sum(1.0 / np.sqrt(dsq)) - return U - - -@jit() -def elec_p_xyx_loop(x0): - """do this with a brutal loop that can be numba ified""" - x0 = x0.reshape(3, int(x0.size / 3)) - x = x0[0, :] - y = x0[1, :] - z = x0[2, :] - U = 0.0 - - r = np.sqrt(x**2 + y**2 + z**2) - x = x / r - y = y / r - z = z / r - - npts = x.size - for i in range(npts - 1): - for j in range(i + 1, npts): - dsq = (x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2 + (z[i] - z[j]) ** 2 - U += 1.0 / np.sqrt(dsq) - return U - - -def x02sphere(x0): - x0 = x0.reshape(3, int(x0.size / 3)) - x = x0[0, :] - y = x0[1, :] - z = x0[2, :] - - r = np.sqrt(x**2 + y**2 + z**2) - x = x / r - y = y / r - z = z / r - - return np.concatenate((x, y, z)) - - -def even_points_xyz( - npts, - use_fib_init=True, - method="CG", - potential_func=elec_p_xyx_loop, - maxiter=None, - callback=None, - verbose=True, -): - """ - Distribute npts over a sphere and minimize their potential, making them - "evenly" distributed - - Starting with the Fibonacci spiral speeds things up by ~factor of 2. - """ - - if use_fib_init: - # Start with fibonacci spiral guess - theta, phi = fib_sphere_grid(npts) - else: - # Random on a sphere - theta = np.random.rand(npts) * np.pi * 2.0 - phi = np.arccos(2.0 * np.random.rand(npts) - 1.0) - - x = np.concatenate(thetaphi2xyz(theta, phi)) - - if verbose: - print("initial potential=", elec_potential_xyz(x)) - # XXX--need to check if this is the best minimizer - min_fit = minimize(potential_func, x, method="CG", options={"maxiter": maxiter}, callback=callback) - - if verbose: - print("final potential=", elec_potential_xyz(min_fit.x)) - print("niteration=", min_fit.nit) - - x = x02sphere(min_fit.x) - - # Looks like I get the same energy values as https://en.wikipedia.org/wiki/Thomson_problem - return x diff --git a/rubin_scheduler/scheduler/training/__init__.py b/rubin_scheduler/scheduler/training/__init__.py deleted file mode 100644 index f47522fb..00000000 --- a/rubin_scheduler/scheduler/training/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .de_optimizer import * diff --git a/rubin_scheduler/scheduler/training/de_optimizer.py b/rubin_scheduler/scheduler/training/de_optimizer.py deleted file mode 100644 index 6b83bbf2..00000000 --- a/rubin_scheduler/scheduler/training/de_optimizer.py +++ /dev/null @@ -1,356 +0,0 @@ -__author__ = "Elahe" - -import numpy as np - - -class DeOptimizer: - def __init__( - self, - evaluator, - population_size, - f, - cr, - max_iter, - strategy=9, - vtr=-1e99, - eps=0, - show_progress=1, - monitor_cycle=np.inf, - gray_training=False, - load_candidate_solution=False, - ): - self.show_progress = show_progress - self.evaluator = evaluator - self.population_size = population_size - self.f = f - self.cr = cr - self.max_iter = max_iter - self.strategy = strategy - - if strategy > 5: - self.st = strategy - 5 - else: - self.st = strategy - - self.monitor_cycle = monitor_cycle - self.D = evaluator.D - self.eps = eps - self.vtr = vtr - - # if population slightly changes while optimization or not - self.gray_training = gray_training - self.load_candidate_solution = load_candidate_solution - - self.optimize() - - def optimize(self): - # initialize optimization - self.initialize_optimization() - # initialize the population - if self.load_candidate_solution: - self.load_generation() - if not self.load_candidate_solution: - self.make_random_population() - - # score initial population - # self.score_population() - # update progress parameters - self.init_progress() - - # iterations - while not self.terminate(): - self.evolve() - self.update_progress() - self.print_status() - self.monitor() - if self.gray_training: - self.swap_population() - self.update_progress() - - def initialize_optimization(self): - # Initial pop features - self.population = np.zeros((self.population_size, self.D)) - self.scores = 1e99 * np.ones(self.population_size) - self.count = 0 - self.nfeval = 0 - self.after_score_pop = np.zeros((self.population_size, self.D)) - - def make_random_population(self): - for indiv in range(self.population_size): - self.population[indiv, :] = self.make_random_individual() - - def make_random_individual(self): - delta = self.evaluator.domain[:, 1] - self.evaluator.domain[:, 0] - offset = self.evaluator.domain[:, 0] - ind = np.multiply(np.random.rand(1, self.D), delta) + offset - return ind - - def score_population(self): - for indiv in range(self.population_size): - self.scores[indiv] = self.evaluator.target(self.population[indiv, :]) - self.nfeval += 1 - """change individuals for the refined values by gray training """ - self.after_score_pop[indiv, :] = self.evaluator.refined_individual() - self.print_ind( - indiv, - self.scores[indiv], - self.population[indiv, :], - self.after_score_pop[indiv, :], - ) - - def init_progress(self): - self.best_index = np.argmin(self.scores) - self.best_val = self.scores[self.best_index] - self.best_ind = self.population[self.best_index, :] - self.mean_val = np.mean(self.scores) - - def update_progress(self): - # bests - temp_best_index = np.argmin(self.scores) - temp_best_score = self.scores[temp_best_index] - # preserving best ever individual - if temp_best_score < self.best_val: - print(temp_best_score, self.best_val) - self.best_index = temp_best_index - self.best_val = temp_best_score - self.best_ind = self.population[self.best_index, :] - new_mean_val = np.mean(self.scores) - self.obj_prog = self.mean_val - new_mean_val - self.mean_val = new_mean_val - # algorithm parameters - self.count += 1 - - def evolve(self): - # Candidate - self.ui = self.cal_trials() - self.after_score_ui = np.zeros(np.shape(self.ui)) - # Selection - for trial_indx in range(self.population_size): - trial_score = self.score_trial(trial_indx) - - if trial_score < self.scores[trial_indx]: - self.population[trial_indx, :] = self.ui[trial_indx, :] - self.scores[trial_indx] = trial_score - self.after_score_pop[trial_indx, :] = self.after_score_ui[trial_indx, :] - - self.print_ind( - trial_indx, - self.scores[trial_indx], - self.population[trial_indx, :], - self.after_score_pop[trial_indx, :], - ) - self.save_last_generation() # most recent generation - - def score_trial(self, trial_indx): - tempval = self.evaluator.target(self.ui[trial_indx, :]) - self.nfeval += 1 - if self.gray_training: - self.after_score_ui[trial_indx, :] = self.evaluator.refined_individual() - return tempval - - def monitor(self): - if self.count != 0 and self.count % self.monitor_cycle == 0: - self.monitor_score = self.best_val - self.monitor_indiv = self.best_ind - self.monitor_mean = self.mean_val - - def swap_population(self): - # regularize refined population before swap (gene-wise swap) - for gene in range(self.D): - self_val = self.after_score_pop[:, gene] - lower_alt_val = self.evaluator.domain[gene, 0] - upper_alt_val = self.evaluator.domain[gene, 1] - lower_bound = self_val >= lower_alt_val - upper_bound = self_val <= upper_alt_val - self_val = np.where(lower_bound, self_val, lower_alt_val) - self_val = np.where(upper_bound, self_val, upper_alt_val) - # swap individuals: - self.population[:, gene] = self_val - # modify scores - self.score_population() - - def cal_trials(self): - popold = self.population - - rot = np.arange(0, self.population_size) - rotd = np.arange(0, self.D) - ind = np.random.permutation(4) - - a1 = np.random.permutation(self.population_size) - rt = (rot + ind[0]) % self.population_size - a2 = a1[rt] - rt = (rot + ind[1]) % self.population_size - a3 = a2[rt] - rt = (rot + ind[2]) % self.population_size - a4 = a3[rt] - rt = (rot + ind[3]) % self.population_size - a5 = a4[rt] - - pm1 = self.population[a1, :] - pm2 = self.population[a2, :] - pm3 = self.population[a3, :] - pm4 = self.population[a4, :] - pm5 = self.population[a5, :] - - pop_of_best_ind = np.zeros((self.population_size, self.D)) - for i in range(0, self.population_size): - pop_of_best_ind[i] = self.best_ind - - cr_decision = np.random.rand(self.population_size, self.D) < self.cr - - if self.strategy > 5: - cr_decision = np.sort(np.transpose(cr_decision)) - for i in range(0, self.population_size): - n = np.floor(np.random.rand(1) * self.D) - if n > 0: - rtd = (rotd + n) % self.D - rtd = rtd.astype(int) - cr_decision[:, i] = cr_decision[rtd, i] - cr_decision = np.transpose(cr_decision) - mpo = cr_decision < 0.5 - ui = 0 - if self.st == 1: - dif = self.f * (pm1 - pm2) - ui = pop_of_best_ind + dif - ui = self.regularize_candidate(ui, pop_of_best_ind) - ui = np.multiply(popold, mpo) + np.multiply(ui, cr_decision) - elif self.st == 2: - dif = self.f * (pm1 - pm2) - ui = pm3 + dif - ui = self.regularize_candidate(ui, pop_of_best_ind) - ui = np.multiply(popold, mpo) + np.multiply(ui, cr_decision) - elif self.st == 3: - dif = self.f * (pop_of_best_ind - popold + pm1 - pm2) - ui = popold + dif - ui = self.regularize_candidate(ui, pop_of_best_ind) - ui = np.multiply(popold, mpo) + np.multiply(ui, cr_decision) - elif self.st == 4: - dif = self.f * (pm1 - pm2 + pm3 - pm4) - ui = pop_of_best_ind + dif - ui = self.regularize_candidate(ui, pop_of_best_ind) - ui = np.multiply(popold, mpo) + np.multiply(ui, cr_decision) - elif self.st == 5: - dif = self.f * (pm1 - pm2 + pm3 - pm4) - ui = pm5 + dif - ui = self.regularize_candidate(ui, pop_of_best_ind) - ui = np.multiply(popold, mpo) + np.multiply(ui, cr_decision) - return ui - - def regularize_candidate(self, candidate, alternative): - lower_bound = self.evaluator.domain[:, 0] - upper_bound = self.evaluator.domain[:, 1] - candidate = np.where(candidate >= lower_bound, candidate, alternative) - regularized_cand = np.where(candidate <= upper_bound, candidate, alternative) - return regularized_cand - - def terminate(self): - # Termination 1 : By maxiter - if self.count >= self.max_iter: - self.termination = 1 - return True - """ - #Termination 2 : By Value to reach - if self.best_val < self.vtr : - self.termination = 2 - return True - #Termination 3 : By monitor cycle change in the objective - if self.monitor_obj_change < self.eps: - self.termination = 3 - return True - """ - return False - - def save_last_generation(self): - np.save("last_gen_pop", self.population) - np.save("last_gen_scr", self.scores) - - def load_generation(self): - try: - temp_population = np.load("last_gen_pop.npy") - if np.shape(temp_population)[0] != self.population_size: - print( - "Previous generation is not of the same size of new setting, DE starts with a random initialization" - ) - self.load_candidate_solution = False - elif np.shape(temp_population)[1] != self.D: - print( - "Previous solution is not of the same size of new solution, DE starts with a random initialization" - ) - self.load_candidate_solution = False - else: - self.population = temp_population - self.scores = np.load("last_gen_scr.npy") - print("Warm start: DE starts with a previously evolved population") - - except: - print("No previous generation is available, DE starts with a random initialization") - self.load_candidate_solution = False - - def print_ind(self, ind_index, score, indiv, refined_indiv): - # print("{}: Objective: {},\nCandidate: {},\nRefined candidate: {}".format(ind_index +1, score, indiv, refined_indiv)) - print("{}: Objective: {},\nCandidate: {}".format(ind_index + 1, -1.0 * score, indiv)) - with open("Output/Output.txt", "a") as text_file: - text_file.write("{}: Objective: {},\nCandidate: {}\n".format(ind_index + 1, -1.0 * score, indiv)) - text_file.close() - - def print_status(self): - print("********************************************************************************************") - print( - "iter {}:best performance: {}, best candidate no.: {}\nbest candidate: {}".format( - self.count, self.best_val * -1, self.best_index + 1, self.best_ind - ) - ) - print("") - with open("Output/Output.txt", "a") as text_file: - text_file.write( - "********************************************************************************************" - ) - text_file.write( - "iter {}:best performance: {}, best candidate no.: {}\nbest candidate: {}\n".format( - self.count, self.best_val * -1, self.best_index + 1, self.best_ind - ) - ) - text_file.close() - - def final_print(self): - print("") - print("* Problem Specifications") - print("Date : {}".format(self.evaluator.scheduler.Date)) - print("Preferences : {}".format(self.evaluator.pref)) - print("") - print("* DE parameters") - print("F : {}".format(self.f)) - print("Cr : {}".format(self.cr)) - print("Pop Size : {}".format(self.population_size)) - print("No. of eval: {}".format(self.nfeval)) - print("No. of Iter: {}".format(self.count)) - print("Termination: {}".format(self.termination)) - print("eps : {}".format(self.eps)) - print("vtr : {}".format(self.vtr)) - print("") - with open("Output/Output.txt", "a") as text_file: - text_file.write("\n* Problem Specifications\n") - text_file.close() - text_file.write("Date : {}\n".format(self.evaluator.scheduler.Date)) - text_file.close() - text_file.write("Preferences : {}\n".format(self.evaluator.pref)) - text_file.close() - text_file.write("\n\n") - text_file.close() - text_file.write("* DE parameters\n") - text_file.write("F : {}\n".format(self.f)) - text_file.close() - text_file.write("Cr : {}\n".format(self.cr)) - text_file.close() - text_file.write("Pop Size : {}\n".format(self.population_size)) - text_file.close() - text_file.write("No. of eval: {}\n".format(self.nfeval)) - text_file.close() - text_file.write("No. of Iter: {}\n".format(self.count)) - text_file.close() - text_file.write("Termination: {}\n".format(self.termination)) - text_file.close() - text_file.write("eps : {}\n".format(self.eps)) - text_file.close() - text_file.write("vtr : {}\n".format(self.vtr)) - text_file.close() diff --git a/rubin_scheduler/scheduler/training/training.py b/rubin_scheduler/scheduler/training/training.py deleted file mode 100644 index 1a0096ef..00000000 --- a/rubin_scheduler/scheduler/training/training.py +++ /dev/null @@ -1,77 +0,0 @@ -import numpy as np - -import rubin_scheduler.scheduler as fs -import rubin_scheduler.scheduler.Training as optional -from rubin_scheduler.model_observatory import ModelObservatory - - -class BlackTraining: - def __init__(self, preferences=[1], gray_train=False, custom_period=1): - self.pref = preferences - - self.survey_length = 0.2 # days - self.surveys = [] - survey_filters = ["r"] - for f in survey_filters: - self.bfs = [] - self.bfs.append(fs.Slewtime_basis_function_cost(filtername=f)) - self.bfs.append(fs.Visit_repeat_basis_function_cost(filtername=f, survey_filters=survey_filters)) - self.bfs.append(fs.Target_map_basis_function_cost(filtername=f, survey_filters=survey_filters)) - self.bfs.append(fs.Normalized_alt_basis_function_cost(filtername=f)) - self.bfs.append(fs.Hour_angle_basis_function_cost()) - self.bfs.append(fs.Depth_percentile_basis_function_cost()) - weights = np.array([5, 2, 1, 1, 2, 1]) - self.surveys.append( - fs.Simple_greedy_survey_fields_cost(self.bfs, weights, filtername=f, block_size=10) - ) - - def de_opt( - self, - n_p, - F, - cr, - max_iter, - D, - domain, - load_candidate_solution, - gray_trianing=False, - ): - self.D = D - self.domain = domain - self.optimizer = optional.DE_optimizer( - self, - n_p, - F, - cr, - max_iter, - gray_training=gray_trianing, - load_candidate_solution=load_candidate_solution, - ) - - def target(self, x): - x[0] = 5 # reduce redundant solutions - for survey in self.surveys: - survey.basis_weights = x - scheduler = fs.Core_scheduler_cost(self.surveys) - observatory = ModelObservatory() - observatory, scheduler, observations = fs.sim_runner( - observatory, scheduler, survey_length=self.survey_length - ) - return -1 * fs.simple_performance_measure(observations, self.pref) - - def refined_individual(self): - return np.zeros(self.D) - - -n_p = 50 # number of candidate solutions that are supposed to explore the space of solution in each iteration, rule of thumb: ~10*D -F = 0.8 # algorithm meta parameter (mutation factor that determines the amount of change for the derivation of candidate solutions of the next iteration) -cr = 0.8 # algorithm meta parameter (crossover rate that determines the rate of mixing of previous candidates to make new candidates) -max_iter = 100 # maximum number of iterations. maximum number of function evaluations = n_p * max_iter, -domain = np.array( - [[0, 10], [0, 10], [0, 10], [0, 10], [0, 10], [0, 10]] -) # Final solution would lie in this domain -D = 6 # weights dimension - - -train = BlackTraining() -train.de_opt(n_p, F, cr, max_iter, D, domain, load_candidate_solution=False) diff --git a/rubin_scheduler/scheduler/utils/utils.py b/rubin_scheduler/scheduler/utils/utils.py index 3b4baa49..38c99acf 100644 --- a/rubin_scheduler/scheduler/utils/utils.py +++ b/rubin_scheduler/scheduler/utils/utils.py @@ -21,6 +21,8 @@ "inrange", "season_calc", "create_season_offset", + "thetaphi2xyz", + "xyz2thetaphi", ) import datetime @@ -52,6 +54,19 @@ def smallest_signed_angle(a1, a2): return result +def thetaphi2xyz(theta, phi): + x = np.sin(phi) * np.cos(theta) + y = np.sin(phi) * np.sin(theta) + z = np.cos(phi) + return x, y, z + + +def xyz2thetaphi(x, y, z): + phi = np.arccos(z) + theta = np.arctan2(y, x) + return theta, phi + + class IntRounded: """ Class to help force comparisons be made on scaled up integers,