1 #ifndef TH_GENERIC_FILE 2 #define TH_GENERIC_FILE "TH/generic/THTensorRandom.cpp" 13 #include <TH/THGenerator.hpp> 15 void THTensor_(random)(THTensor *
self, THGenerator *_generator)
17 std::lock_guard<std::mutex> lock(_generator->mutex);
18 #if defined(TH_REAL_IS_BYTE) 19 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (uint8_t)(THRandom_random(_generator) % (UINT8_MAX + 1)););
20 #elif defined(TH_REAL_IS_CHAR) 21 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (int8_t)(THRandom_random(_generator) % (INT8_MAX + 1)););
22 #elif defined(TH_REAL_IS_SHORT) 23 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (int16_t)(THRandom_random(_generator) % (INT16_MAX + 1)););
24 #elif defined(TH_REAL_IS_INT) 25 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (int32_t)(THRandom_random(_generator) % (INT32_MAX + 1UL)););
26 #elif defined(TH_REAL_IS_LONG) 27 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (uint64_t)(THRandom_random64(_generator) % (LONG_MAX + 1ULL)););
28 #elif defined(TH_REAL_IS_FLOAT) 29 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (
float)(THRandom_random(_generator) % ((1ULL << FLT_MANT_DIG) + 1)););
30 #elif defined(TH_REAL_IS_DOUBLE) 31 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (
double)(THRandom_random64(_generator) % ((1ULL << DBL_MANT_DIG) + 1)););
32 #elif defined(TH_REAL_IS_BOOL) 33 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (
bool)(THRandom_random(_generator) % 2););
40 void THTensor_(clampedRandom)(THTensor *
self, THGenerator *_generator, int64_t min, int64_t max) {
41 std::lock_guard<std::mutex> lock(_generator->mutex);
42 THArgCheck(max > min, 2,
"max must be greater than min, but got: min = %lld, max = %lld", min, max);
43 uint64_t range = max - min;
44 #if defined(TH_REAL_IS_LONG) || defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) 45 if (range >= 1ULL << 32) {
46 TH_TENSOR_APPLY(scalar_t,
self, *self_data = static_cast<scalar_t>(static_cast<int64_t>((THRandom_random64(_generator) % range) + min));)
50 TH_TENSOR_APPLY(scalar_t,
self, *self_data = static_cast<scalar_t>(static_cast<int64_t>((THRandom_random(_generator) % range) + min));)
53 void THTensor_(cappedRandom)(THTensor *
self, THGenerator *_generator, int64_t max) {
54 THArgCheck(max > 0, 1,
"max must be positive, but got: max = %lld", max);
55 THTensor_(clampedRandom)(
self, _generator, 0, max);
58 void THTensor_(geometric)(THTensor *
self, THGenerator *_generator,
double p)
60 std::lock_guard<std::mutex> lock(_generator->mutex);
61 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (scalar_t)THRandom_geometric(_generator, p););
64 #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) 66 #if defined(TH_REAL_IS_FLOAT) 67 #define TH_REAL_MIN FLT_MIN 68 #elif defined(TH_REAL_IS_DOUBLE) 69 #define TH_REAL_MIN DBL_MIN 72 void THTensor_(uniform)(THTensor *
self, THGenerator *_generator,
double a,
double b)
74 std::lock_guard<std::mutex> lock(_generator->mutex);
75 #if defined(TH_REAL_IS_FLOAT) 76 TH_TENSOR_APPLY(scalar_t,
self, *self_data =
77 (scalar_t)THRandom_uniformFloat(_generator, (scalar_t)a, (scalar_t)b););
79 TH_TENSOR_APPLY(scalar_t,
self, *self_data =
80 (scalar_t)THRandom_uniform(_generator, a, b););
84 void THTensor_(normal)(THTensor *
self, THGenerator *_generator,
double mean,
double stddev)
86 std::lock_guard<std::mutex> lock(_generator->mutex);
87 const int64_t size = THTensor_(numel)(
self);
88 if (size >= 16 && THTensor_(isContiguous)(
self)) {
89 THVector_(normal_fill)(THStorage_(data)(THTensor_getStoragePtr(
self)) + self->storage_offset(), size, _generator, mean, stddev);
91 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (scalar_t)THRandom_normal(_generator, mean, stddev););
95 void THTensor_(normal_means)(THTensor *
self, THGenerator *gen, THTensor *means,
double stddev)
97 THTensor_(resizeAs)(
self, means);
98 THTensor_(normal)(
self, gen, 0, stddev);
99 THTensor_(cadd)(
self,
self, 1, means);
102 void THTensor_(normal_stddevs)(THTensor *
self, THGenerator *gen,
double mean, THTensor *stddevs)
104 THTensor_(resizeAs)(
self, stddevs);
105 THTensor_(normal)(
self, gen, 0, 1);
106 THTensor_(cmul)(
self,
self, stddevs);
107 THTensor_(add)(
self,
self, mean);
110 void THTensor_(normal_means_stddevs)(THTensor *
self, THGenerator *gen, THTensor *means, THTensor *stddevs)
112 THTensor_(resizeAs)(
self, means);
113 THTensor_(normal)(
self, gen, 0, 1);
114 THTensor_(cmul)(
self,
self, stddevs);
115 THTensor_(cadd)(
self,
self, 1, means);
118 void THTensor_(exponential)(THTensor *
self, THGenerator *_generator,
double lambda)
120 std::lock_guard<std::mutex> lock(_generator->mutex);
121 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (scalar_t)THRandom_exponential(_generator, lambda););
126 void THTensor_(cauchy)(THTensor *
self, THGenerator *_generator,
double median,
double sigma)
128 std::lock_guard<std::mutex> lock(_generator->mutex);
129 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (scalar_t)THRandom_cauchy(_generator, median, sigma););
132 void THTensor_(logNormal)(THTensor *
self, THGenerator *_generator,
double mean,
double stdv)
134 std::lock_guard<std::mutex> lock(_generator->mutex);
135 TH_TENSOR_APPLY(scalar_t,
self, *self_data = (scalar_t)THRandom_logNormal(_generator, mean, stdv););
138 void THTensor_(multinomialAliasSetup)(THTensor *probs, THLongTensor *J, THTensor *q)
140 int64_t inputsize = THTensor_(nElement)(probs);
142 THLongTensor *smaller = THLongTensor_newWithSize1d(inputsize);
143 THLongTensor *larger = THLongTensor_newWithSize1d(inputsize);
146 THLongTensor_resize1d(J, inputsize);
147 THTensor_(resize1d)(q, inputsize);
148 scalar_t *q_data = q->data<scalar_t>();
149 int64_t *J_data = THLongTensor_data(J);
151 for (i = 0; i < inputsize; i++)
153 THLongTensor_fastSet1d(J, i, 0L);
154 scalar_t val = THTensor_(fastGet1d)(probs, i);
155 THTensor_(fastSet1d)(q, i, inputsize*val);
157 if (inputsize * val < 1.0)
159 THLongTensor_fastSet1d(smaller, small_c, i);
164 THLongTensor_fastSet1d(larger, large_c, i);
172 int64_t large, small;
173 while (small_c > 0 && large_c > 0)
175 large = THLongTensor_fastGet1d(larger, large_c-1);
176 small = THLongTensor_fastGet1d(smaller, small_c-1);
178 THLongTensor_fastSet1d(J, small, large);
179 q_data[large * q->stride(0)] -= 1.0 - THTensor_(fastGet1d)(q, small);
181 if(q_data[large * q->stride(0)] < 1.0)
183 THLongTensor_fastSet1d(smaller, small_c-1, large);
188 THLongTensor_fastSet1d(larger, large_c-1, large);
193 scalar_t q_min = THTensor_(fastGet1d)(q, inputsize-1);
194 scalar_t q_max = q_min;
196 for (i=0; i < inputsize; i++)
198 q_temp = THTensor_(fastGet1d)(q, i);
201 else if (q_temp > q_max)
204 THArgCheckWithCleanup((q_min > 0),
205 THCleanup(THLongTensor_free(smaller); THLongTensor_free(larger);), 2,
206 "q_min is less than 0");
210 for (i=0; i < inputsize; i++)
212 q_data[i*q->stride(0)] /= q_max;
215 for (i=0; i < inputsize; i++)
222 THLongTensor_free(smaller);
223 THLongTensor_free(larger);
225 void THTensor_(multinomialAliasDraw)(THLongTensor *
self, THGenerator *_generator, THLongTensor *J, THTensor *q)
227 std::lock_guard<std::mutex> lock(_generator->mutex);
228 int64_t K = THLongTensor_nElement(J);
229 int64_t output_nelem = THLongTensor_nElement(
self);
230 int64_t i = 0, _mask=0;
232 int64_t rand_ind, sample_idx, J_sample;
234 for (i=0; i < output_nelem; i++)
236 rand_ind = THRandom_uniform(_generator, 0, K);
238 _q = THTensor_(fastGet1d)(q, rand_ind);
240 _mask = THRandom_bernoulli(_generator, _q);
242 J_sample = THLongTensor_fastGet1d(J, rand_ind);
244 sample_idx = J_sample*(1 -_mask) + (rand_ind+1L) * _mask;
246 THLongTensor_fastSet1d(
self, i, sample_idx-1L);
249 void THTensor_(multinomial)(THLongTensor *
self, THGenerator *_generator, THTensor *prob_dist,
int n_sample,
int with_replacement)
251 std::lock_guard<std::mutex> lock(_generator->mutex);
252 int64_t start_dim = THTensor_(nDimensionLegacyAll)(prob_dist);
254 int64_t n_categories;
255 THDoubleTensor* cum_dist;
260 THTensor_(unsqueeze1d)(prob_dist, prob_dist, 0);
263 n_dist = THTensor_(size)(prob_dist, 0);
264 n_categories = THTensor_(size)(prob_dist, 1);
266 THArgCheckWithCleanup(n_sample > 0,
267 THCleanup(
if (start_dim == 1) THTensor_(squeeze1d)(prob_dist, prob_dist, 0);),
269 "cannot sample n_sample <= 0 samples");
271 if (!with_replacement)
273 THArgCheckWithCleanup((!with_replacement) && (n_sample <= n_categories),
274 THCleanup(
if (start_dim == 1) THTensor_(squeeze1d)(prob_dist, prob_dist, 0);),
276 "cannot sample n_sample > prob_dist.size(1) samples without replacement");
280 cum_dist = THDoubleTensor_newWithSize1d(n_categories);
283 THLongTensor_resize2d(
self, n_dist , n_sample);
285 auto prod_dist_storage = THTensor_getStoragePtr(prob_dist);
286 auto cum_dist_storage = THTensor_getStoragePtr(cum_dist);
287 auto self_storage = THTensor_getStoragePtr(
self);
289 auto prod_dist_offset = prob_dist->storage_offset();
290 auto prod_dist_stride_0 = prob_dist->stride(0);
291 auto prod_dist_stride_1 = prob_dist->stride(1);
293 auto cum_dist_offset = cum_dist->storage_offset();
294 auto cum_dist_stride_0 = cum_dist->stride(0);
296 auto self_dist_offset =
self->storage_offset();
297 auto self_dist_stride_0 =
self->stride(0);
298 auto self_dist_stride_1 =
self->stride(1);
300 for (i=0; i<n_dist; i++)
306 for (j=0; j<n_categories; j++)
308 val = THStorage_(
get)( \
310 prod_dist_offset+i*prod_dist_stride_0+j*prod_dist_stride_1 \
312 THArgCheckWithCleanup((val >= 0),
313 THCleanup(THDoubleTensor_free(cum_dist);
if (start_dim == 1) THTensor_(squeeze1d)(prob_dist, prob_dist, 0);),
315 "invalid multinomial distribution (encountering probability entry < 0)");
316 THArgCheckWithCleanup((std::isfinite(val)),
317 THCleanup(THDoubleTensor_free(cum_dist);
if (start_dim == 1) THTensor_(squeeze1d)(prob_dist, prob_dist, 0);),
319 "invalid multinomial distribution (encountering probability entry = infinity or NaN)");
326 cum_dist_offset+j*cum_dist_stride_0, \
330 THArgCheckWithCleanup((sum > 0),
331 THCleanup(THDoubleTensor_free(cum_dist);
if (start_dim == 1) THTensor_(squeeze1d)(prob_dist, prob_dist, 0);),
333 "invalid multinomial distribution (sum of probabilities <= 0)");
334 THArgCheckWithCleanup((with_replacement || (n_categories - n_zeros >= n_sample)),
335 THCleanup(THDoubleTensor_free(cum_dist);
if (start_dim == 1) THTensor_(squeeze1d)(prob_dist, prob_dist, 0);),
337 "invalid multinomial distribution (with replacement=False, not enough non-negative category to sample)");
340 if ( (sum > 0) || ( ( sum < 1.00001) && (sum > 0.99999) ) )
342 for (j=0; j<n_categories; j++)
344 THDoubleTensor_data(cum_dist)[j*cum_dist_stride_0] /= sum;
348 for (j=0; j<n_sample; j++)
351 double uniform_sample = THRandom_uniform(_generator, 0, 1);
354 int left_pointer = 0;
355 int right_pointer = n_categories;
360 THDoubleTensor_data(cum_dist)[(n_categories-1)*cum_dist_stride_0] = 1;
362 while(right_pointer - left_pointer > 0)
364 mid_pointer = left_pointer + (right_pointer - left_pointer) / 2;
365 cum_prob = THDoubleStorage_get( \
367 cum_dist_offset+mid_pointer*cum_dist_stride_0 \
369 if (cum_prob < uniform_sample)
371 left_pointer = mid_pointer + 1;
375 right_pointer = mid_pointer;
378 sample_idx = left_pointer;
383 self_dist_offset+i*self_dist_stride_0+j*self_dist_stride_1, \
388 if (!with_replacement && j < n_sample - 1)
397 new_val = THDoubleStorage_get( \
399 cum_dist_offset+(sample_idx-1)*cum_dist_stride_0 \
403 diff = THDoubleStorage_get( \
405 cum_dist_offset+sample_idx*cum_dist_stride_0 \
409 for (k=0; k<n_categories; k++)
411 new_val = THDoubleStorage_get( \
413 cum_dist_offset+k*cum_dist_stride_0 \
422 THDoubleStorage_set( \
424 cum_dist_offset+k*cum_dist_stride_0, \
432 THDoubleTensor_free(cum_dist);
436 THLongTensor_resize1d(
self, n_sample);
437 THTensor_(squeeze1d)(prob_dist, prob_dist, 0);
442 #if defined(TH_REAL_IS_BYTE) 443 void THTensor_(getRNGState)(THGenerator *_generator, THTensor *
self)
445 std::lock_guard<std::mutex> lock(_generator->mutex);
446 static const size_t size =
sizeof(THGeneratorState);
447 THGeneratorState *rng_state;
448 THTensor_(resize1d)(
self, size);
449 THArgCheck(THTensor_(nElement)(
self) == size, 1,
"RNG state is wrong size");
450 THArgCheck(THTensor_(isContiguous)(
self), 1,
"RNG state needs to be contiguous");
451 rng_state = (THGeneratorState *)self->data<scalar_t>();
452 THGeneratorState_copy(rng_state, &_generator->gen_state);
455 void THTensor_(setRNGState)(THGenerator *_generator, THTensor *
self)
457 std::lock_guard<std::mutex> lock(_generator->mutex);
458 static const size_t size =
sizeof(THGeneratorState);
459 THGeneratorState *rng_state;
460 THArgCheck(THTensor_(nElement)(
self) == size, 1,
"RNG state is wrong size");
461 THArgCheck(THTensor_(isContiguous)(
self), 1,
"RNG state needs to be contiguous");
462 rng_state = (THGeneratorState *)self->data<scalar_t>();
463 THArgCheck(THGeneratorState_isValid(rng_state), 1,
"Invalid RNG state");
464 THGeneratorState_copy(&_generator->gen_state, rng_state);