# coding=utf-8 # Nicholas M Boffi # 5/3/17 # 8.334 Final Project import numpy as np from union_find import * from ising import * from multiprocessing.dummy import Pool as ThreadPool from multiprocessing import Pool from os import makedirs from os.path import exists from time import time as wall_time def swap_step(grid, bonds, rates, N, T, curr_step, thresh): """ Description: -------------- Does one 'swap' on the grid. Uses the rates for all possible transitions in the grid to compute which event actually occurs. Draws from an exponential distribution using the total rate constant to determine the amount of time that actually occurred. Input: ------ grid : The grid of spin values. bonds : The array of bonds. rates : The linear array of transition rates. N : The total number of particles. T : The temperature at which we are swapping. curr_step : The step in the simulation we are currently on - useful for diagonistics. thresh : Rate threshold. Output: ------- xx : x coordinate of the swapped particle (or vacancy). This is the coordinate BEFORE the swap! yy : y coordinate of the swapped particle (or vacancy). This is the coordiante BEFORE the swap! xdist : x distance traveled - either 1, 0, or -1. ydist : y distance traveled - either 1, 0, or -1. time : Time elapsed for the swap. cluster : The cluster of the particle involved in the swap. """ # Assemble the list of partial sums. rate_sums = np.cumsum(rates) # Pick the event uniformly using the rates. event_picker = rate_sums[-1]*np.random.uniform() xdist, ydist = 0, 0 # Determine the index in the rate list of the event that occurred. possible = np.where(rate_sums > event_picker)[0] event_ind = possible[0] # Note that event_ind is of the form 2*(xx + N*yy) + {0, 1}, where the braces indicate 0 or 1. # Up rates come first, then right rates - if we have a +1, the event # is a right swap, and event_ind will be odd. Hence, this boolean variable # is valid. right_swap = event_ind % 2 # Figure out the x and y coordinates of the swapped particle. # The formulas come from event_ind = 2*(xx + N*yy) + {0, 1}, and noting the /2 will cancel the +1 if it exists. # Also, xx < N, so that xx/N = 0. xx = event_ind/2 % N yy = event_ind/2 / N # Determine if the guy swapping is a particle or a vacancy. particle = (grid[xx, yy] == 1) # Stores the cluster of the swapped particle. cluster = [] # Handle the two swap cases accordingly. if right_swap: # Perform the swap. ri = (xx + 1) % N # Before swapping identify the cluster of the swapped particle. cluster = identify_cluster(grid, N, xx, yy) if particle else identify_cluster(grid, N, (xx + 1) % N, yy) # And now do the swap. grid[xx, yy] *= -1 grid[ri, yy] *= -1 # Now that "grid" has changed, update the rates. update_rates(grid, bonds, rates, xx, yy, right_swap, N, T, thresh) # We do not move in y. ydist = 0 # We are interested in the distance the particle involved in the swap # traveled, not the vacancy - hence, we can obtain that distance as follows. xdist = 1 if particle else -1 # And update the xx index of the particle actually involved in the swap. xx = ri if (not particle) else xx else: # Perform the swap. ui = (yy + 1) % N # Before swapping, identify the cluster of the swapped particle. cluster = identify_cluster(grid, N, xx, yy) if particle else identify_cluster(grid, N, xx, (yy + 1) % N) # And now actually do the swap. grid[xx, yy] *= -1 grid[xx, ui] *= -1 # Now that "grid" has changed, update the rates. update_rates(grid, bonds, rates, xx, yy, right_swap, N, T, thresh) # We do not move in x. xdist = 0 # And same as the above case. ydist = 1 if particle else -1 # And update the yy index of the particle actually involved in the swap. yy = ui if (not particle) else yy # And return all the data, drawing from an exponential distribution for the time passed. # Note that we return the coordinates and distance change of the PARTICLE involved in the swap, # note that lattice spot whose up or right rate was chosen (which could have been a vacancy, # as was handled above!) return (xx, yy, xdist, ydist, np.random.exponential(1./rate_sums[-1]), cluster) def identify_cluster(grid, N, xx, yy): """ Description: ----------- Identifies the cluster of particles that the particle located at grid point (xx, yy) belongs to. Uses a basic breadth-first search. Input: ------ grid : The grid of spins. N : The dimensions of the N*N grid. xx : The x index of the particle whose cluster we want to identify. yy : The y index of the particle whose cluster we want to identify. Output: ------- cluster : A list containing the cluster that (xx, yy) is a part of, stored as tuples of grid indices. """ # Short circuit it if we start on a vacancy. if ( grid[xx, yy] != 1 ): return [] # Keeps track of the nodes we've been to. visited = [(xx, yy)] # Keeps track of the cluster. cluster = [(xx, yy)] # Keeps track of the people we still need to investigate. # Add only the neighbors that are particles. prospies = [] for curr_el in [((xx + 1) % N, yy), ((xx - 1) % N, yy), (xx, (yy + 1) % N), (xx, (yy - 1) % N)]: if ( grid[curr_el[0], curr_el[1]] == 1 ): prospies.append(curr_el) # Keep going until we've exhausted all possibilities. while (len(prospies) != 0): # Take out the next guy in line and extract his coordinates. curr_prospie = prospies.pop(0) px = curr_prospie[0] py = curr_prospie[1] # Mark this guy as visited. visited.append(curr_prospie) # If he's a particle... if (grid[px, py] == 1): # Add him to the cluster. cluster.append(curr_prospie) # Compute his neighbors. new_prospies = [] for curr_el in [((px + 1) % N, py), ((px - 1) % N, py), (px, (py + 1) % N), (px, (py - 1) % N)]: if (grid[curr_el[0], curr_el[1]] == 1): new_prospies.append(curr_el) # Only keep those that have not already been visited. new_prospies = list(set(new_prospies) - set(visited)) # And add those guys to the list of prospies, removing duplicates. prospies = list(set(prospies + new_prospies)) return cluster def hoshen_kopelman(grid, N): """ Description: ----------- Implements the Hoshen-Kopelman algorithm for finding all clusters on a lattice using the attached UnionFind data structure implementation. Input: ------ grid : The grid of spin states on which we want to compute the clusters. Output: ------- clusters : A UnionFind data structure storing the clusters on the grid. """ # Declare our empty UnionFind data structure. clusters = UnionFind() # Perform a raster-scan of the grid. for xx in range(N): for yy in range(N): # If we found a particle. if (grid[xx, yy] == 1): # Test if the guys up and to the right are particles as well. up = (grid[ xx, (yy + 1) % N] == 1) right = (grid[(xx + 1) % N, yy] == 1) # Indices describing the neighboring points. up_tup = (xx, (yy + 1) % N) ri_tup = ((xx + 1) % N, yy) # New cluster. if not (up or right): clusters.find((xx, yy)) # All neighbors form a cluster. elif (up and right): # Join the up and right clusters, and then add the new point to the cluster. clusters.union(up_tup, ri_tup) clusters.union(up_tup, (xx, yy)) # Just the up neighbor forms a cluster. elif up: clusters.union((xx, yy), up_tup) # Just the right else: clusters.union((xx, yy), ri_tup) return clusters def uf_to_np(grid, clusters, N): """ Description: ------------ Takes a UnionFind data structure of clusters computed using the Hoshen-Kopelman algorithm and converts it to an N*N NumPy array of cluster labels. Input: ------ grid : The grid of spins. clusters : A UnionFind data structure containing cluster information on the grid. N : Size of the N*N grid. Output: ------- clust_arr : A NumPy array of cluster labels as described in the description. clust_sizes : A NumPy dictionary of sizes corresponding to each cluster label. """ # Declare an empty labeling array. clust_arr = np.zeros((N, N)) # And an empty size array. clust_sizes = {} # Raster scan the grid. for xx in range(N): for yy in range(N): # If we are on a particle. if (grid[xx, yy] == 1): # Look up its representative. rep = clusters.find((xx, yy)) # Interpret the representative as a decimal number to use as a label. label = rep[0] + rep[1]*10 clust_arr[xx, yy] = label # And update or create the cluster size if (label in clust_sizes): clust_sizes[label] += 1 else: clust_sizes[label] = 1 # Label all vacancies with a -1. else: clust_arr[xx, yy] = -1 return clust_arr, clust_sizes def compute_rates(grid, bonds, N, T, thresh): """ Description: ------------ Computes the transition rates for all possible states in the model. Input: ------ grid : Matrix of spins on the grid. bonds : Matrix of bond energies between spins. N : Size of the N*N grid. T : Temperature at which we are performing the swaps. thresh : Rate threshold. Output: ------- rates : A linear array of all transition rates. We only store the up rate and the right rate for each particle (as these are equivalent to the down rate of the above particle and the left rate to the right particle, and hence with periodic boundary conditions we have all of them). The array has dimension NxNx2, and proceeds in natural ordering with the up rate and then the down rate for each element. """ # Allocate space for an up rate and a down rate for each spot in the grid. rates = np.zeros(N*N*2) # Fill up the rates array. for yy in range(N): for xx in range(N): # Up rate, then right rate. rates[2*(xx + N*yy)], rates[2*(xx + N*yy) + 1], = compute_ur_rate(xx, yy, grid, bonds, N, T) # Threshold if (rates[2*(xx + N*yy)] > thresh): rates[2*(xx + N*yy)] /= 10**(np.log(rates[2*(xx + N*yy)]/thresh)) if (rates[2*(xx + N*yy) + 1] > thresh): rates[2*(xx + N*yy) + 1] /= 10**(np.log(rates[2*(xx + N*yy) + 1]/thresh)) # Output the non particle-particle or vacancy-vacancy rates for diagnostics. real_rates = rates[rates != 0] np.savetxt("rates.txt", sorted(real_rates)) print 'Saved rates.' return rates def compute_ur_rate(xx, yy, grid, bonds, N, T): """ Description: ------------ Computes the "up" and the "right" transition rate for the particle located at grid point (xx, yy). Input: ------ xx : x coordinate of the particle. yy : y coordinate of the particle. grid : Matrix of spins on the grid. bonds : Matrix of bond energies between spins. N : Size of the N*N grid. T : Temperature at which we are performing the swaps. Output: ------- up_rate : The rate to exchange with the spin above. right_rate : The rate to exchange with the spin to the right. """ # Figure out the neighboring spin indices. # Right, left, up, down indices respectively. ri = (xx + 1) % N li = (xx - 1) % N ui = (yy + 1) % N di = (yy - 1) % N # Determine the energy change for exchanging with the spin above or the spin to the right. # First handle the above case. We figure out the indices for the neighbors of our neighbor. nri = ri nli = li nui = (yy + 2) % N # Compute the local environment of the spin at (xx, yy) - i.e., the bond energies times the neighboring spin # values. Note that grid[xx, yy]*env basically gives the value of -beta*H for this spin. base_env = grid[ri, yy]*bonds[ri, yy] + grid[li, yy]*bonds[li, yy] + grid[xx, ui]*bonds[xx, ui] + grid[xx, di]*bonds[xx, di] # Only switch with opposite states. if (grid[xx, ui] == grid[xx, yy]): up_rate = 0 else: # Compute the energies. Note that we really need to compute the local environments of the two spins, excluding # the bond that they share (so, three terms for each spin). The resulting energy change is then the sum of # each spin times its local environment minus its neighbors environment. up_env = grid[nri, ui]*bonds[nri, ui] + grid[nli, ui]*bonds[nli, ui] + grid[xx, nui]*bonds[xx, nui] env = base_env - grid[xx, ui]*bonds[xx, ui] up_rate = np.exp(-(grid[xx, yy]*(env - up_env) + grid[xx, ui]*(up_env - env))/T) # Now, the right case. nri = (xx + 2) % N nui = ui ndi = di # Only swich with opposite states. if (grid[ri, yy] == grid[xx, yy]): right_rate = 0 else: # Same as the up-rate block. right_env = grid[ri, nui]*bonds[ri, nui] + grid[ri, ndi]*grid[ri, ndi] + grid[nri, yy]*bonds[nri, yy] env = base_env - grid[ri, yy]*bonds[ri, yy] right_rate = np.exp(-(grid[xx, yy]*(env - right_env) + grid[ri, yy]*(right_env - env))/T) # Return the rates. return (up_rate, right_rate) def update_rates(grid, bonds, rates, xx, yy, right_swap, N, T, thresh): """ Description: ------------ Updates the list of rates for all possible transitions on the grid after a swap has occurred. Input: ----- grid : The grid of spins. bonds : The array of bond energies. rates : The current, out of date, rate list after a swap at (xx, yy) occurred. xx : The x index of the particle (or vacancy) that swapped. yy : The y index of the particle (or vacancy) that swapped. right_swap : A boolean indicating whether the swap was to the right (True) or up (False). N : Number of lattice points in the N*N grid. T : Temperature at which we need to compute the rates. thresh : Rate threshold. """ # Loop over all the neighbors whose rates need to be adjusted. for (nxx, nyy) in get_nnn(grid, xx, yy, right_swap, N): # Compute and store the updated rate. rates[2*(nxx + N*nyy)], rates[2*(nxx + N*nyy) + 1] = compute_ur_rate(nxx, nyy, grid, bonds, N, T) # Threshold if (rates[2*(nxx + N*nyy)] > thresh): rates[2*(nxx + N*nyy)] /= 10**(np.log(rates[2*(nxx + N*nyy)]/thresh)) if (rates[2*(nxx + N*nyy) + 1] > thresh): rates[2*(nxx + N*nyy) + 1] /= 10**(np.log(rates[2*(nxx + N*nyy) + 1]/thresh)) # Save the rates for diagnostics. real_rates = rates[rates != 0] np.savetxt("rates.txt", sorted(real_rates)) def get_nnn(grid, xx, yy, right_swap, N): """ Description: ------------- Returns a list of nearest and next-nearest neighbors for the particle located at (xx, yy), as well as the neighbor particle it just swapped with. Does so manually, because the enumeration is simple in the square lattice case, and probably even simpler than using loops over the two coordinates. Input: ----- grid : The grid of spins. xx : The x index of the particle or vacancy involved in the swap. yy : The y index of the particle or vacancy involved in the swap. right_swap : Whether or not the particle (or vacancy) swapped to the right. Output: ------ (noname): A list of tuples containing the x and y coordinates of all the nearest and next-nearest neighbors whose rates need to be updated to continue with the simulation. """ if right_swap: return [( xx, yy ), ( (xx + 1) % N, yy ), ( (xx + 2) % N, yy ), ( (xx + 3) % N, yy ), ( (xx - 1) % N, yy ), ( (xx - 2) % N, yy ), ( xx, (yy + 1) % N ), ( xx, (yy + 2) % N ), ( xx, (yy - 1) % N ), ( xx, (yy - 2) % N ), ( (xx + 1) % N, (yy + 1) % N ), ( (xx + 1) % N, (yy + 2) % N ), ( (xx + 1) % N, (yy - 1) % N ), ( (xx + 1) % N, (yy - 2) % N ), ( (xx + 2) % N, (yy + 1) % N ), ( (xx + 2) % N, (yy - 1) % N ), ( (xx - 1) % N, (yy + 1) % N ), ( (xx - 1) % N, (yy - 1) % N )] else: return [(xx, yy), ((xx + 1) % N, yy), ((xx + 2) % N, yy), ((xx - 1) % N, yy), ((xx - 2) % N, yy), (xx, (yy - 1) % N), ((xx - 1) % N, (yy - 1) % N), ((xx + 1) % N, (yy - 1) % N), (xx, (yy - 2) % N), (xx, (yy + 1) % N), ((xx + 1) % N, (yy + 1) % N), ((xx + 2) % N, (yy + 1) % N), ((xx - 1) % N, (yy + 1) % N), ((xx - 2) % N, (yy + 1) % N), (xx, (yy + 2) % N), ((xx - 1) % N, (yy + 2) % N), ((xx + 1) % N, (yy + 2) % N), (xx, (yy + 3) % N)] def output_grid_swap(xswap, yswap, grid): """ Description: ------------ Debugging function. Prints out the grid, using a different label for the particle located at (xswap, yswap). Input: ------ xswap : x index of the particle to label differently. yswap : y index of the particle to label differently. grid : The grid to output. """ # Copy the grid so we don't mess it up. tmp = grid.copy() # Set the tracer particle to 99 so we can see it. tmp[xswap, yswap] = 99 # Replace all the vacancies with 0's for ease of visualization. tmp[tmp == -1] = 0 print tmp def output_grid_tracers(tracers, num_tracers, grid): """ Description: ------------ Debugging function. Prints out the grid, using a different label for all of the tracer particles. Input: ------ tracers : List of tracer coordinates. num_tracers : Total number of tracers. grid : The grid to output. """ # Copy the grid so we don't mess it up. tmp = grid.copy() # Loop over all the tracers, modifying their representation. for curr_tracer in range(num_tracers): tmp[tracers[curr_tracer, 0], tracers[curr_tracer, 1]] = 99 # Replace holes with 0's for ease of visualization. tmp[tmp == -1] = 0 print tmp print def diffuse_tracers(grid, bonds, N, num_steps, T, n_tracers, thresh, output=False): """ Description: ------------ Follows a number of random tracer particles as they diffuse through the grid using Gillespie dynamics with a standard kinetic Monte Carlo implementation. Input: ------ grid : The grid of spin values. bonds : The bond energies in the grid. N : The dimension of the N*N grid. num_steps : The number of simulation timesteps to perform (there is one exchange per simulation timestep). n_tracers : The number of tracer particles to follow. thresh : Rate threshold. """ # Declare time and distance arrays. times = np.zeros(num_steps) time = 0 # First index: tracer particle. # Second index: current x or y distance. dists = np.zeros((n_tracers, 2)) # First index: timestep. # Second index: tracer particle. # i.e., the squared distance trajectory is stored columnwise for each # tracer particle in this matrix. dists_sq = np.zeros((num_steps, n_tracers), dtype=np.int16) # Create a list of tracer particles. # First index: tracer particle. # Second index: current x and y coordinates in the grid. tracers = np.zeros((n_tracers, 2), dtype=np.int16) # Fill up the list of tracer particles. for curr_tracer in range(n_tracers): # Pick a random tracer particle. tracer_x, tracer_y = pick_particle(grid, N) # Add it to the list. tracers[curr_tracer, 0] = tracer_x tracers[curr_tracer, 1] = tracer_y # Compute the rates in the simulation. rates = compute_rates(grid, bonds, N, T, thresh) # Loop over all the steps. real_times = [] real_times.append(wall_time()) for curr_step in range(num_steps): # Print some diagnostic information. if (curr_step % (N*N) == 0): real_times.append(wall_time()) print 'Currently on step', curr_step/N/N, 'per particle. took:', real_times[-1] - real_times[-2], 'seconds.' # Swap a particle. swap_x, swap_y, dx, dy, dt, cluster = swap_step(grid, bonds, rates, N, T, curr_step, thresh) # Check if we swapped a particle on the boundary of the same # cluster that involves a tracer particle. To do this, we use set intersection, # and first map the elements of the n_tracers*2 NumPy array to a 1d set of n_tracers tuples. tracers_in_cluster = list(set(map(tuple, tracers)).intersection(set(cluster))) # We've got a tracer in the cluster. if ( len(tracers_in_cluster) > 0 ): # Generate a uniform random integer to select which particle in the cluster actually swapped. ind = np.random.randint(len(cluster)) # Check if we picked a tracer particle. if (ind < len(tracers_in_cluster)): # Get the tracer particle's grid indices. tswap_x, tswap_y = tracers_in_cluster[ind] # Figure out the tracer particle's index in the linear tracers array. tracer_ind = np.where(np.all(np.equal(tracers, [tswap_x, tswap_y]), axis=1))[0][0] # Update that guy's distance. # Note that we need to figure out which is the shortest distance - to wrap around, # or to just go directly. if (swap_x - tswap_x > 0): dists[tracer_ind, 0] += min((swap_x - tswap_x, N - swap_x + tswap_x)) + dx else: dists[tracer_ind, 0] += max((swap_x - tswap_x, N - swap_x + tswap_x)) + dx if (swap_y - tswap_y > 0): dists[tracer_ind, 1] += min((swap_y - tswap_y, N - swap_y + tswap_y)) + dy else: dists[tracer_ind, 1] += max((swap_y - tswap_y, N - swap_y + tswap_y)) + dy # Also update his squared distance. dists_sq[curr_step, tracer_ind] = dists[tracer_ind, 0]**2 + dists[tracer_ind, 1]**2 # And his position. Note that he moved from the (swap_x, swap_y) spot! tracers[tracer_ind, 0] = ( swap_x + dx ) % N tracers[tracer_ind, 1] = ( swap_y + dy ) % N # And make sure that all the other squared distances stay the same. if (curr_step > 0): for curr_tracer in range(n_tracers): if (curr_tracer != tracer_ind): dists_sq[curr_step, curr_tracer] = dists_sq[curr_step - 1, curr_tracer] # Didn't pick any of the involved tracers - keep them all the same. else: if (curr_step > 0): for curr_tracer in range(n_tracers): dists_sq[curr_step, curr_tracer] = dists_sq[curr_step - 1, curr_tracer] # No tracer in cluster - keep them all the same. else: if (curr_step > 0): for curr_tracer in range(n_tracers): dists_sq[curr_step, curr_tracer] = dists_sq[curr_step - 1, curr_tracer] # Update the time. time += dt # Update the running total. times[curr_step] = time # Output the grid for visualization. if (output): output_grid_tracers(tracers, n_tracers, grid) # And save the time and squared distance arrays for post processing. np.save('dist', dists_sq) np.save('time', times) def diffuse_tracers_lt_samp(grid, bonds, N, num_steps, lt_max, num_times, T, thresh, n_tracers, save_fold, save_str, output=False): """ Description: ------------ Follows a number of random tracer particles as they diffuse through the grid using Gillespie dynamics with a standard kinetic Monte Carlo implementation. Samples the dynamics at equally spaced intervals in log time, for easier data post-processing. Input: ------ grid : The grid of spin values. bonds : The bond energies in the grid. N : The dimension of the N*N grid. num_steps : The number of simulation timesteps to perform (there is one exchange per simulation timestep). lt_max : The maximum log time. num_times : The number of points in time. T : The temperature at which we compute the diffusive dynamics. thresh : The threshold applied to rates computed in the simulation. n_tracers : The number of tracer particles to follow. save_fold : Folder to save the output data in. save_str : String to append to the output when saving. """ # Declare the equally spaced log time array. log_times = np.linspace(0, lt_max, num_times) # Keep track of the current time, for comparison to log_times. time = 0 # Keeps track of where we are at in the log_times array. lt_ind = 0 # First index: tracer particle. # Second index: current x or y distance. dists = np.zeros((n_tracers, 2)) # First index: timestep. # Second index: tracer particle. # i.e., the squared distance trajectory is stored columnwise for each # tracer particle in this matrix. dists_sq = np.zeros((num_times, n_tracers), dtype=np.int16) # Create a list of tracer particles. # First index: tracer particle. # Second index: current x and y coordinates in the grid. tracers = np.zeros((n_tracers, 2), dtype=np.int16) # Fill up the list of tracer particles. for curr_tracer in range(n_tracers): # Pick a random tracer particle. tracer_x, tracer_y = pick_particle(grid, N) # Add it to the list. tracers[curr_tracer, 0] = tracer_x tracers[curr_tracer, 1] = tracer_y # Compute the rates in the simulation. rates = compute_rates(grid, bonds, N, T, thresh) # Keeps track of whether or not we need to store the data. store_data = False # Keep track of wall time per frame. real_times = [wall_time()] # Loop over all the steps. for curr_step in range(num_steps): # Print some diagnostic information, and periodically save the output. if (curr_step % (N*N) == 0): real_times.append(wall_time()) print 'Currently on step', curr_step/N/N, 'per particle on processor', save_str, '. Took:', real_times[-1] - real_times[-2], 'seconds.' np.save(save_fold + '/dist' + str(save_str), dists_sq) np.save(save_fold + '/log_time' + str(save_str), log_times) # Swap a particle. swap_x, swap_y, dx, dy, dt, cluster = swap_step(grid, bonds, rates, N, T, curr_step, thresh) # Update the time. time += dt # Do the time comparison. if (np.log(time) > log_times[lt_ind]): store_data = True # Check if we swapped a particle on the boundary of the same # cluster that involves a tracer particle. To do this, we use set intersection, # and first map the elements of the n_tracers x 2 NumPy array to a 1d set of n_tracers tuples. tracers_in_cluster = list(set(map(tuple, tracers)).intersection(set(cluster))) # We've got a tracer in the cluster. if ( len(tracers_in_cluster) > 0 ): # Generate a uniform random integer to select which particle in the cluster actually swapped. ind = np.random.randint(len(cluster)) # Check if we picked a tracer particle if (ind < len(tracers_in_cluster)): # Get the tracer particle's grid indices. tswap_x, tswap_y = tracers_in_cluster[ind] # Figure out the tracer particle's index in the linear tracers array. tracer_ind = np.where(np.all(np.equal(tracers, [tswap_x, tswap_y]), axis=1))[0][0] # Update the tracer's x and y distances. # Note that we need to figure out which is the shortest distance - to wrap around, # or to just go directly. if (swap_x - tswap_x > 0): dists[tracer_ind, 0] += min((swap_x - tswap_x, N - swap_x + tswap_x)) + dx else: dists[tracer_ind, 0] += max((swap_x - tswap_x, N - swap_x + tswap_x)) + dx if (swap_y - tswap_y > 0): dists[tracer_ind, 1] += min((swap_y - tswap_y, N - swap_y + tswap_y)) + dy else: dists[tracer_ind, 1] += max((swap_y - tswap_y, N - swap_y + tswap_y)) + dy # And his position. Note that he moved from the (swap_x, swap_y) spot! tracers[tracer_ind, 0] = ( swap_x + dx ) % N tracers[tracer_ind, 1] = ( swap_y + dy ) % N # If necessary, update all the stored R^2 values. if store_data: # Figure out the latest time we are greater than. last_ind = np.where(np.log(time) > log_times)[0][-1] # Update the data in the in-between times. for curr_ind in range(lt_ind, last_ind): dists_sq[curr_ind, tracer_ind] = dists_sq[lt_ind-1, tracer_ind] # And store the most up-to-date data. dists_sq[last_ind, tracer_ind] = dists[tracer_ind, 0]**2 + dists[tracer_ind, 1]**2 # Handle the guys who were not updated. for curr_tracer in range(n_tracers): if (curr_tracer != tracer_ind): for curr_ind in range(lt_ind, last_ind+1): dists_sq[curr_ind, curr_tracer] = dists_sq[lt_ind-1, curr_tracer] # And make sure we don't store it at an incorrect time by accident. store_data = False # And finally, increment the time counter. lt_ind = last_ind + 1 # Didn't pick any of the involved tracers - keep them all the same, if we need to store. else: if store_data: # Figure out the latest time we are greater than. last_ind = np.where(np.log(time) > log_times)[0][-1] # Loop over all the tracers and keep their distances the same. for curr_tracer in range(n_tracers): for curr_ind in range(lt_ind, last_ind+1): dists_sq[curr_ind, curr_tracer] = dists_sq[lt_ind-1, curr_tracer] # And finally, increment the time counter. lt_ind = last_ind + 1 # No tracer in cluster - keep them all the same. else: if store_data: # Figure out the latest time we are greater than. last_ind = np.where(np.log(time) > log_times)[0][-1] # Loop over all the tracers and keep their distances the same. for curr_tracer in range(n_tracers): for curr_ind in range(lt_ind, last_ind+1): dists_sq[curr_ind, curr_tracer] = dists_sq[lt_ind-1, curr_tracer] # And finally, increment the time counter. lt_ind = last_ind + 1 # Output the grid for visualization. if (output): output_grid_tracers(tracers, n_tracers, grid) # Determine the final time we actually got to. tf_ind = np.where(np.log(time) < log_times)[0][0] # Save the output, even past the final tf_ind so that disorder averaging is simpler. np.save(save_fold + '/dist' + str(save_str), dists_sq) np.save(save_fold + '/log_time' + str(save_str), log_times) # Return the grid so we can play around with the 'atomistic' approach. return grid def pick_particle(grid, N): """ Description: ------------ Simply picks a particle from the grid. Input: ------ grid : The grid of spins. N : The size of the N*N grid. Output: ------- swap_x : The x index of the chosen particle. swap_y : The y index of the chosen particle. """ # Pick a random particle. swap_x = np.random.randint(0, N) swap_y = np.random.randint(0, N) # Make sure it's an electron. while (grid[swap_x, swap_y] != 1): swap_x = np.random.randint(0, N) swap_y = np.random.randint(0, N) return swap_x, swap_y def unwrapper(x): """ Description ----------- The map/Pool functionality provided by the Python multiprocessing library requires the function being mapped to only take one argument. Hence, we pass all arguments as a tuple, and use this function to unpack them and pass it to diffuse_tracers_lt_samp. """ diffuse_tracers_lt_samp(*x) # Needed for parallelization def unwrapper_Tmax(x): """ Description ----------- The map/Pool functionality provided by the Python multiprocessing library requires the function being mapped to only take one argument. Hence, we pass all arguments as a tuple, and use this function to unpack them and pass it to diffuse_tracers_lt_samp. The difference between this function and the above is that this is used for the 'atomistic' approach - we first diffuse at a higher temperature for a bit, and then take that equilibrated grid and store the data from the diffusive dynamics on that modified grid. """ # Maximum temperature that the glass was exposed to. Tm = .5 # Diffusion temperature. T_diff = x[6] # Rate threshold for the diffusion temperature. rate_thresh = x[7] # Rate threshold for the maximum temperature to ensure the energy threshold is identical. mod_rate_thresh = rate_thresh**(Tm/T_diff) # Do some diffusion dynamics for a bit at Tmax. # We arbitrarily do 1/10th of the simulation time where we record data. post_grid = diffuse_tracers_lt_samp(x[0], x[1], x[2], x[3]/10, x[4], x[5], Tm, mod_rate_thresh, x[8], x[9], x[10]) # Pick up all the original input except for the grid. y = x[1:] # Run the simulation after initial diffusive equilibration, and only save these results. diffuse_tracers_lt_samp(post_grid, *y) if __name__ == '__main__': # Define the parameters. # Tm = 1 width = 5. T = .5 conc = .75 N = 25 ferro = True fold = 'tmp5_w5_N25_Ethresh' logt_max = 700 nt = 7000 nsims = 20 thresh = 1.e-3 # Arrays for distances and times for each swap. num_steps = 100 num_tracers = int((conc-.1)*N*N) # Create the output folder for the data. if (not exists(fold)): makedirs(fold) # Create a pool of threads to do many runs at the same time. pool = Pool() conditions = [(instantiate_grid(N, conc), draw_bonds(N, width, (not ferro)), N, num_steps*N*N, logt_max, nt, T, thresh, num_tracers, fold, str(ii)) for ii in range(nsims)] # Do the parallel simulations. rslts = pool.map(unwrapper_Tmax, conditions)