Swarm-NG  1.1
log_rvs.hpp
Go to the documentation of this file.
1 /*************************************************************************
2  * Copyright (C) 2011 by Eric Ford and the Swarm-NG Development Team *
3  * *
4  * This program is free software; you can redistribute it and/or modify *
5  * it under the terms of the GNU General Public License as published by *
6  * the Free Software Foundation; either version 3 of the License. *
7  * *
8  * This program is distributed in the hope that it will be useful, *
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of *
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
11  * GNU General Public License for more details. *
12  * *
13  * You should have received a copy of the GNU General Public License *
14  * along with this program; if not, write to the *
15  * Free Software Foundation, Inc., *
16  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17  ************************************************************************/
18 
27 #pragma once
28 
29 #include<iostream>
30 #include<sstream>
31 #include <thrust/device_vector.h>
32 #include <thrust/sort.h>
35 
36 namespace swarm {
37  namespace monitors {
38 
39 #if __CUDA_ARCH__ >= 200
40 
47 struct log_rvs_params {
48  double tol, dt;
49  int next_time_idx;
50  int nbod, num_times;
51  thrust::device_ptr<double> rv_times_dev;
52 
53  __host__ log_rvs_params(const config &cfg)
54  {
55  num_times = 0;
56  next_time_idx = 0;
57  // other paramebers
58  tol = cfg.optional("log_rvs_tol", 2.e-8);
59  // WARNING assumes Hermite Fixed w/ nbod bodies
60  nbod = cfg.require<int>("nbod");
61  dt = cfg.require("time_step", 0.0); // use hermite's timestep
62 
63  // Read observation times input device_array
64  std::string filename;
65  filename = cfg.optional("rv_filename",std::string("rv.in"));
66  std::ifstream rv_file(filename.c_str());
67  if(!rv_file.good())
68  {
69  std::cerr << "# Can't open >" << rv_file << "<.\n";
70  return ;
71  }
72 
73  thrust::host_vector<double> rv_times_host;
74  while(!rv_file.eof())
75  {
76  std::string line;
77  std::getline(rv_file,line);
78  if(rv_file.eof()) continue;
79  std::stringstream line_stream(line);
80  double time;
81  line_stream >> time;
82  try { rv_times_host.push_back(time); }
83  catch(thrust::system_error e)
84  { std::cerr << "Error push_back: " << e.what() << std::endl; }
85  }
86  rv_file.close();
87  std::cerr << "# Read " << rv_times_host.size() << " RV observation times from " << filename << "\n";
88 
89  try
90  { thrust::sort(rv_times_host.begin(),rv_times_host.end()); }
91  catch(thrust::system_error e)
92  { std::cerr << "Error sort: " << e.what() << std::endl; }
93 
94  // Allocate & upload list of times onto device
95  try
96  {
97  num_times = rv_times_host.size();
98  rv_times_dev = thrust::device_malloc<double>(num_times);
99  }
100  catch(thrust::system_error e)
101  { std::cerr << "Error: device_malloc: " << e.what() << std::endl; }
102 
103  try
104  {
105 /* for(int i=0;i<rv_times_host.size();++i)
106  {
107  rv_times_dev[i] = rv_times_host[i];
108  } */
109  thrust::copy(rv_times_host.begin(),rv_times_host.end(),rv_times_dev);
110  }
111  catch(thrust::system_error e)
112  { std::cerr << "Error copy: " << e.what() << std::endl; }
113 
114 
115  }
116 
117 
118  // WARNING: This is not automatically called by destructor
119  // because I can't figure how to make that happen only on the host destructor
120  //
121  __host__ void deallocate_device_data()
122  {
123  thrust::device_free(rv_times_dev);
124  }
125 
126 };
127 
148 template<class log_t>
149 class log_rvs {
150  public:
151  typedef log_rvs_params params;
152 
153  private:
154  params _params;
155  bool condition_met;
156 
157  ensemble::SystemRef& _sys;
158  // double _next_log_time;
159  log_t& _log;
160 
161  public:
162  template<class T>
163  static GENERIC int thread_per_system(T compile_time_param){
164  return 1;
165  }
166 
167  template<class T>
168  static GENERIC int shmem_per_system(T compile_time_param) {
169  return 0;
170  }
171  GPUAPI bool is_deactivate_on() { return false; };
172  GPUAPI bool is_log_on() { return _params.tol!=0.; };
173  GPUAPI bool is_verbose_on() { return false; };
174  GPUAPI bool is_any_on() { return is_deactivate_on() || is_log_on() || is_verbose_on() ; }
175  GPUAPI bool is_condition_met () { return ( condition_met ); }
176  GPUAPI bool need_to_log_system ()
177  { return (is_log_on() && is_condition_met() ); }
178  GPUAPI bool need_to_deactivate ()
179  { return ( is_deactivate_on() && is_condition_met() ); }
180 
181  GPUAPI void log_system() { log::system(_log, _sys); }
182 
183  GPUAPI void operator () (const int thread_in_system)
184  {
185  if(pass_one(thread_in_system))
186  pass_two(thread_in_system);
187  }
188 
189  GPUAPI bool pass_one (const int thread_in_system)
190  {
191  condition_met = false;
192  double t = _sys.time();
193  if(_params.next_time_idx>=_params.num_times) return false;
194  while(_params.rv_times_dev[_params.next_time_idx]<t)
195  {
196  _params.next_time_idx++;
197  if(_params.next_time_idx>=_params.num_times) return false;
198  }
199  double log_time = _params.rv_times_dev[_params.next_time_idx];
200  if( (log_time>=t) && (log_time<t+_params.dt) )
201  return true;
202  else
203  return false;
204  }
205 
206 
207  template<int nbod>
208  GPUAPI double calc_star_vz(const int& thread_in_system, const double& dt)
209  {
210  extern __shared__ char shared_mem[];
212  typedef swarm::gpu::bppt::GravitationAccJerk<par_t> calcForces_t;
213  typedef typename calcForces_t::shared_data grav_t;
214  calcForces_t calcForces(_sys,*( (grav_t*) (&shared_mem[(swarm::gpu::bppt::sysid_in_block()%SHMEM_CHUNK_SIZE)*sizeof(double)+(swarm::gpu::bppt::sysid_in_block()/SHMEM_CHUNK_SIZE)*SHMEM_CHUNK_SIZE*(nbod*(nbod-1)*3*sizeof(double))]) ) );
215 
216  double acc, jerk;
217 #if USING_INTEGRATOR_THAT_OVER_WRITES_SHARED_DATA || 1
218  const int bid = swarm::gpu::bppt::thread_body_idx(nbod);
219  const int cid = swarm::gpu::bppt::thread_component_idx(nbod);
220  double pos = _sys[bid][cid].pos(), vel = _sys[bid][cid].vel();
221 #if 1
222  calcForces(thread_in_system,bid,cid,pos,vel,acc,jerk);
223 #else
224  __syncthreads();
225  if(thread_in_system < calcForces_t::pair_count)
226  calcForces.calc_pair(thread_in_system);
227  __syncthreads();
228 #endif
229 #endif
230  if(thread_in_system==0)
231  {
232 
233  calcForces.sum(0,2,acc,jerk);
234  double star_vz = _sys[0][2].vel()+dt*(acc+0.5*dt*jerk);
235  return star_vz;
236  }
237  else
238  { return 0.; }
239  }
240 
241 
242 
243  GPUAPI int pass_two (const int thread_in_system)
244  {
245  if(is_any_on())
246  {
247  double t_log = _params.rv_times_dev[_params.next_time_idx];
248  double dt = t_log-_sys.time();
249  const int nbod = _params.nbod;
250  double star_vz;
251  if((nbod==3) && (nbod<MAX_NBODIES))
252  star_vz = calc_star_vz<3>(thread_in_system,dt);
253 #if 0
254  else if((nbod==4) && (nbod<MAX_NBODIES))
255  star_vz = calc_star_vz<4>(thread_in_system,dt);
256  else if((nbod==5) && (nbod<MAX_NBODIES))
257  star_vz = calc_star_vz<5>(thread_in_system,dt);
258  else if((nbod==6) && (nbod<MAX_NBODIES))
259  star_vz = calc_star_vz<6>(thread_in_system,dt);
260  else if((nbod==7) && (nbod<MAX_NBODIES))
261  star_vz = calc_star_vz<7>(thread_in_system,dt);
262  else if((nbod==8) && (nbod<MAX_NBODIES))
263  star_vz = calc_star_vz<8>(thread_in_system,dt);
264 #endif
265  if(thread_in_system==0)
266  {
267  log::event(_log,log::EVT_RV_OBS,t_log,_sys.id(),0,star_vz);
268  }
269  _params.next_time_idx++;
270  condition_met = true;
271  }
272  return condition_met;
273  }
274 
275 
276  GPUAPI log_rvs(const params& p,ensemble::SystemRef& s,log_t& l)
277  :_params(p),_sys(s),_log(l) {}
278 
279 };
280 
281 #endif
282 
283  }
284 
285 
286 }