_id
stringlengths
98
184
text
stringlengths
91
10.9k
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/generated_random_variables.py#L83-L94
def _build_custom_rv(distribution, sample_shape, value, name): """""" # Program transformations (e.g., `make_log_joint_fn`) assume that # the traced constructor has `name` and `value` kwargs, enabling # them to override the value of an RV according to its name. # User-defined RVs inherit their name from the provided # distribution; this helper method exposes the name as a dummy kwarg # so that it's visible to program transformations. del name # unused return RandomVariable(distribution=distribution, sample_shape=sample_shape, value=value)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/assert_util.py#L44-L73
def assert_finite(x, data=None, summarize=None, message=None, name=None): """ """ with tf.compat.v2.name_scope(name or 'assert_finite'): x_ = tf.get_static_value(x) if x_ is not None: if ~np.all(np.isfinite(x_)): raise ValueError(message) return x assertion = tf.compat.v1.assert_equal( tf.math.is_finite(x), tf.ones_like(x, tf.bool), data=data, summarize=summarize, message=message) with tf.control_dependencies([assertion]): return tf.identity(x)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/langevin.py#L925-L929
def _maybe_broadcast_volatility(volatility_parts, state_parts): """""" return [v + tf.zeros_like(sp, dtype=sp.dtype.base_dtype) for v, sp in zip(volatility_parts, state_parts)]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/slugify/slugify.py#L74-L175
def slugify(text, entities=True, decimal=True, hexadecimal=True, max_length=0, word_boundary=False, separator=DEFAULT_SEPARATOR, save_order=False, stopwords=(), regex_pattern=None, lowercase=True, replacements=()): """ """ # user-specific replacements if replacements: for old, new in replacements: text = text.replace(old, new) # ensure text is unicode if not isinstance(text, _unicode_type): text = _unicode(text, 'utf-8', 'ignore') # replace quotes with dashes - pre-process text = QUOTE_PATTERN.sub(DEFAULT_SEPARATOR, text) # decode unicode text = unidecode.unidecode(text) # ensure text is still in unicode if not isinstance(text, _unicode_type): text = _unicode(text, 'utf-8', 'ignore') # character entity reference if entities: text = CHAR_ENTITY_PATTERN.sub(lambda m: unichr(name2codepoint[m.group(1)]), text) # decimal character reference if decimal: try: text = DECIMAL_PATTERN.sub(lambda m: unichr(int(m.group(1))), text) except Exception: pass # hexadecimal character reference if hexadecimal: try: text = HEX_PATTERN.sub(lambda m: unichr(int(m.group(1), 16)), text) except Exception: pass # translate text = unicodedata.normalize('NFKD', text) # make the text lowercase (optional) if lowercase: text = text.lower() # remove generated quotes -- post-process text = QUOTE_PATTERN.sub('', text) # cleanup numbers text = NUMBERS_PATTERN.sub('', text) # replace all other unwanted characters if lowercase: pattern = regex_pattern or ALLOWED_CHARS_PATTERN else: pattern = regex_pattern or ALLOWED_CHARS_PATTERN_WITH_UPPERCASE text = re.sub(pattern, DEFAULT_SEPARATOR, text) # remove redundant text = DUPLICATE_DASH_PATTERN.sub(DEFAULT_SEPARATOR, text).strip(DEFAULT_SEPARATOR) # remove stopwords if stopwords: if lowercase: stopwords_lower = [s.lower() for s in stopwords] words = [w for w in text.split(DEFAULT_SEPARATOR) if w not in stopwords_lower] else: words = [w for w in text.split(DEFAULT_SEPARATOR) if w not in stopwords] text = DEFAULT_SEPARATOR.join(words) # finalize user-specific replacements if replacements: for old, new in replacements: text = text.replace(old, new) # smart truncate if requested if max_length > 0: text = smart_truncate(text, max_length, word_boundary, DEFAULT_SEPARATOR, save_order) if separator != DEFAULT_SEPARATOR: text = text.replace(DEFAULT_SEPARATOR, separator) return text
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L875-L886
def _largest_integer_by_dtype(dt): """""" if not _is_known_dtype(dt): raise TypeError("Unrecognized dtype: {}".format(dt.name)) if dt.is_floating: return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1)) if dt.is_integer: return np.iinfo(dt.as_numpy_dtype).max if dt.base_dtype == tf.bool: return int(1) # We actually can't land here but keep the case for completeness. raise TypeError("Unrecognized dtype: {}".format(dt.name))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L358-L391
def call(self, inputs): """ """ # We explicitly broadcast f to the same shape as z other than the final # dimension, because `tf.concat` can't automatically do this. dynamic, static = inputs timesteps = tf.shape(input=dynamic)[-2] static = static[..., tf.newaxis, :] + tf.zeros([timesteps, 1]) latents = tf.concat([dynamic, static], axis=-1) # (sample, N, T, latents) out = self.dense(latents) out = tf.reshape(out, (-1, 1, 1, self.hidden_size)) out = self.conv_transpose1(out) out = self.conv_transpose2(out) out = self.conv_transpose3(out) out = self.conv_transpose4(out) # (sample*N*T, h, w, c) expanded_shape = tf.concat( (tf.shape(input=latents)[:-1], tf.shape(input=out)[1:]), axis=0) out = tf.reshape(out, expanded_shape) # (sample, N, T, h, w, c) return tfd.Independent( distribution=tfd.Normal(loc=out, scale=1.), reinterpreted_batch_ndims=3, # wrap (h, w, c) name="decoded_image")
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L256-L267
def get_product(self, location, product_id, project_id=None, retry=None, timeout=None, metadata=None): """ """ client = self.get_conn() name = ProductSearchClient.product_path(project_id, location, product_id) self.log.info('Retrieving Product: %s', name) response = client.get_product(name=name, retry=retry, timeout=timeout, metadata=metadata) self.log.info('Product retrieved.') self.log.debug('Product retrieved:\n%s', response) return MessageToDict(response)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L210-L253
def _write_local_schema_file(self, cursor): """ """ schema_str = None schema_file_mime_type = 'application/json' tmp_schema_file_handle = NamedTemporaryFile(delete=True) if self.schema is not None and isinstance(self.schema, string_types): schema_str = self.schema.encode('utf-8') elif self.schema is not None and isinstance(self.schema, list): schema_str = json.dumps(self.schema).encode('utf-8') else: schema = [] for field in cursor.description: # See PEP 249 for details about the description tuple. field_name = field[0] field_type = self.type_map(field[1]) # Always allow TIMESTAMP to be nullable. MySQLdb returns None types # for required fields because some MySQL timestamps can't be # represented by Python's datetime (e.g. 0000-00-00 00:00:00). if field[6] or field_type == 'TIMESTAMP': field_mode = 'NULLABLE' else: field_mode = 'REQUIRED' schema.append({ 'name': field_name, 'type': field_type, 'mode': field_mode, }) schema_str = json.dumps(schema, sort_keys=True).encode('utf-8') tmp_schema_file_handle.write(schema_str) self.log.info('Using schema for %s: %s', self.schema_filename, schema_str) schema_file_to_upload = { 'file_name': self.schema_filename, 'file_handle': tmp_schema_file_handle, 'file_mime_type': schema_file_mime_type } return schema_file_to_upload
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vq_vae.py#L259-L301
def add_ema_control_dependencies(vector_quantizer, one_hot_assignments, codes, commitment_loss, decay): """ """ # Use an exponential moving average to update the codebook. updated_ema_count = moving_averages.assign_moving_average( vector_quantizer.ema_count, tf.reduce_sum(input_tensor=one_hot_assignments, axis=[0, 1]), decay, zero_debias=False) updated_ema_means = moving_averages.assign_moving_average( vector_quantizer.ema_means, tf.reduce_sum( input_tensor=tf.expand_dims(codes, 2) * tf.expand_dims(one_hot_assignments, 3), axis=[0, 1]), decay, zero_debias=False) # Add small value to avoid dividing by zero. perturbed_ema_count = updated_ema_count + 1e-5 with tf.control_dependencies([commitment_loss]): update_means = tf.compat.v1.assign( vector_quantizer.codebook, updated_ema_means / perturbed_ema_count[..., tf.newaxis]) with tf.control_dependencies([update_means]): return tf.identity(commitment_loss)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1002-L1098
def embed_check_integer_casting_closed(x, target_dtype, assert_nonnegative=True, assert_positive=False, name="embed_check_casting_closed"): """ """ with tf.name_scope(name): x = tf.convert_to_tensor(value=x, name="x") if (not _is_integer_like_by_dtype(x.dtype) and not dtype_util.is_floating(x.dtype)): raise TypeError("{}.dtype must be floating- or " "integer-type.".format(dtype_util.name(x.dtype))) if (not _is_integer_like_by_dtype(target_dtype) and not dtype_util.is_floating(target_dtype)): raise TypeError("target_dtype ({}) must be floating- or " "integer-type.".format(dtype_util.name(target_dtype))) if (not _is_integer_like_by_dtype(x.dtype) and not _is_integer_like_by_dtype(target_dtype)): raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) " "must be integer-type.".format( x, dtype_util.name(x.dtype), dtype_util.name(target_dtype))) assertions = [] if assert_positive: assertions += [ assert_util.assert_positive(x, message="Elements must be positive."), ] elif assert_nonnegative: assertions += [ assert_util.assert_non_negative( x, message="Elements must be non-negative."), ] if dtype_util.is_floating(x.dtype): # Being here means _is_integer_like_by_dtype(target_dtype) = True. # Since this check implies the magnitude check below, we need only it. assertions += [ assert_integer_form( x, int_dtype=target_dtype, message="Elements must be {}-equivalent.".format( dtype_util.name(target_dtype))), ] else: if (_largest_integer_by_dtype(x.dtype) > _largest_integer_by_dtype(target_dtype)): # Cast may lose integer precision. assertions += [ assert_util.assert_less_equal( x, _largest_integer_by_dtype(target_dtype), message=("Elements cannot exceed {}.".format( _largest_integer_by_dtype(target_dtype)))), ] if (not assert_nonnegative and (_smallest_integer_by_dtype( x.dtype) < _smallest_integer_by_dtype(target_dtype))): assertions += [ assert_util.assert_greater_equal( x, _smallest_integer_by_dtype(target_dtype), message=("Elements cannot be smaller than {}.".format( _smallest_integer_by_dtype(target_dtype)))), ] if not assertions: return x return with_dependencies(assertions, x)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/dense_variational.py#L215-L254
def get_config(self): """ """ config = { 'units': self.units, 'activation': (tf.keras.activations.serialize(self.activation) if self.activation else None), 'activity_regularizer': tf.keras.initializers.serialize(self.activity_regularizer), } function_keys = [ 'kernel_posterior_fn', 'kernel_posterior_tensor_fn', 'kernel_prior_fn', 'kernel_divergence_fn', 'bias_posterior_fn', 'bias_posterior_tensor_fn', 'bias_prior_fn', 'bias_divergence_fn', ] for function_key in function_keys: function = getattr(self, function_key) if function is None: function_name = None function_type = None else: function_name, function_type = tfp_layers_util.serialize_function( function) config[function_key] = function_name config[function_key + '_type'] = function_type base_config = super(_DenseVariational, self).get_config() return dict(list(base_config.items()) + list(config.items()))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/mark_tasks.py#L211-L239
def set_dag_run_state_to_success(dag, execution_date, commit=False, session=None): """ """ res = [] if not dag or not execution_date: return res # Mark the dag run to success. if commit: _set_dag_run_state(dag.dag_id, execution_date, State.SUCCESS, session) # Mark all task instances of the dag run to success. for task in dag.tasks: task.dag = dag new_state = set_state(task=task, execution_date=execution_date, state=State.SUCCESS, commit=commit) res.extend(new_state) return res
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/affine.py#L34-L37
def _as_tensor(x, name, dtype): """""" return None if x is None else tf.convert_to_tensor( value=x, name=name, dtype=dtype)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/uniform.py#L144-L147
def range(self, name="range"): """""" with self._name_scope(name): return self.high - self.low
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_diffeomixture.py#L853-L879
def interpolate_loc(grid, loc): """""" if len(loc) != 2: raise NotImplementedError("Currently only bimixtures are supported; " "len(scale)={} is not 2.".format(len(loc))) deg = tf.compat.dimension_value( tensorshape_util.with_rank_at_least(grid.shape, 1)[-1]) if deg is None: raise ValueError("Num quadrature grid points must be known prior " "to graph execution.") with tf.name_scope("interpolate_loc"): if loc is None or loc[0] is None and loc[1] is None: return [None]*deg # shape: [B, 1, k, deg] w = grid[..., tf.newaxis, :, :] loc = [ x[..., tf.newaxis] # shape: [B, e, 1] if x is not None else None for x in loc ] if loc[0] is None: x = w[..., 1, :] * loc[1] # shape: [B, e, deg] elif loc[1] is None: x = w[..., 0, :] * loc[0] # shape: [B, e, deg] else: delta = loc[0] - loc[1] x = w[..., 0, :] * delta + loc[1] # shape: [B, e, deg] return [x[..., k] for k in range(deg)]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_sql_hook.py#L136-L159
def run_query(self, cmd="", **kwargs): """ """ spark_sql_cmd = self._prepare_command(cmd) self._sp = subprocess.Popen(spark_sql_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) for line in iter(self._sp.stdout.readline, ''): self.log.info(line) returncode = self._sp.wait() if returncode: raise AirflowException( "Cannot execute {} on {}. Process exit code: {}.".format( cmd, self._conn.host, returncode ) )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/opsgenie_alert_operator.py#L107-L124
def _build_opsgenie_payload(self): """ """ payload = {} for key in [ "message", "alias", "description", "responders", "visibleTo", "actions", "tags", "details", "entity", "source", "priority", "user", "note" ]: val = getattr(self, key) if val: payload[key] = val return payload
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/awsbatch_operator.py#L117-L145
def _wait_for_task_ended(self): """ """ try: waiter = self.client.get_waiter('job_execution_complete') waiter.config.max_attempts = sys.maxsize # timeout is managed by airflow waiter.wait(jobs=[self.jobId]) except ValueError: # If waiter not available use expo retry = True retries = 0 while retries < self.max_retries and retry: self.log.info('AWS Batch retry in the next %s seconds', retries) response = self.client.describe_jobs( jobs=[self.jobId] ) if response['jobs'][-1]['status'] in ['SUCCEEDED', 'FAILED']: retry = False sleep(1 + pow(retries * 0.1, 2)) retries += 1
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/transforms.py#L624-L664
def get_params(img, scale, ratio): """ """ area = img.size[0] * img.size[1] for attempt in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if w <= img.size[0] and h <= img.size[1]: i = random.randint(0, img.size[1] - h) j = random.randint(0, img.size[0] - w) return i, j, h, w # Fallback to central crop in_ratio = img.size[0] / img.size[1] if (in_ratio < min(ratio)): w = img.size[0] h = w / min(ratio) elif (in_ratio > max(ratio)): h = img.size[1] w = h * max(ratio) else: # whole image w = img.size[0] h = img.size[1] i = (img.size[1] - h) // 2 j = (img.size[0] - w) // 2 return i, j, h, w
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/prefer_static.py#L41-L56
def _prefer_static(original_fn, static_fn): """""" original_spec = tf_inspect.getfullargspec(original_fn) static_spec = tf_inspect.getfullargspec(static_fn) if original_spec != static_spec: raise ValueError( 'Arg specs do not match: original={}, static={}, fn={}'.format( original_spec, static_spec, original_fn)) @decorator.decorator def wrap(wrapped_fn, *args, **kwargs): del wrapped_fn [args_, kwargs_], all_static = _maybe_get_static_args([args, kwargs]) if all_static: return static_fn(*args_, **kwargs_) return original_fn(*args, **kwargs) return wrap(original_fn)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/mvn_linear_operator.py#L269-L340
def _kl_brute_force(a, b, name=None): """ """ def squared_frobenius_norm(x): """Helper to make KL calculation slightly more readable.""" # http://mathworld.wolfram.com/FrobeniusNorm.html # The gradient of KL[p,q] is not defined when p==q. The culprit is # tf.norm, i.e., we cannot use the commented out code. # return tf.square(tf.norm(x, ord="fro", axis=[-2, -1])) return tf.reduce_sum(input_tensor=tf.square(x), axis=[-2, -1]) # TODO(b/35041439): See also b/35040945. Remove this function once LinOp # supports something like: # A.inverse().solve(B).norm(order='fro', axis=[-1, -2]) def is_diagonal(x): """Helper to identify if `LinearOperator` has only a diagonal component.""" return (isinstance(x, tf.linalg.LinearOperatorIdentity) or isinstance(x, tf.linalg.LinearOperatorScaledIdentity) or isinstance(x, tf.linalg.LinearOperatorDiag)) with tf.name_scope(name or "kl_mvn"): # Calculation is based on: # http://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians # and, # https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm # i.e., # If Ca = AA', Cb = BB', then # tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A'] # = tr[inv(B) A A' inv(B)'] # = tr[(inv(B) A) (inv(B) A)'] # = sum_{ij} (inv(B) A)_{ij}**2 # = ||inv(B) A||_F**2 # where ||.||_F is the Frobenius norm and the second equality follows from # the cyclic permutation property. if is_diagonal(a.scale) and is_diagonal(b.scale): # Using `stddev` because it handles expansion of Identity cases. b_inv_a = (a.stddev() / b.stddev())[..., tf.newaxis] else: b_inv_a = b.scale.solve(a.scale.to_dense()) kl_div = ( b.scale.log_abs_determinant() - a.scale.log_abs_determinant() + 0.5 * (-tf.cast(a.scale.domain_dimension_tensor(), a.dtype) + squared_frobenius_norm(b_inv_a) + squared_frobenius_norm( b.scale.solve((b.mean() - a.mean())[..., tf.newaxis])))) tensorshape_util.set_shape( kl_div, tf.broadcast_static_shape(a.batch_shape, b.batch_shape)) return kl_div
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L279-L293
def check_for_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''): """ """ return self.get_wildcard_key(wildcard_key=wildcard_key, bucket_name=bucket_name, delimiter=delimiter) is not None
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/ftp_hook.py#L102-L117
def describe_directory(self, path): """ """ conn = self.get_conn() conn.cwd(path) try: # only works in Python 3 files = dict(conn.mlsd()) except AttributeError: files = dict(mlsd(conn)) return files
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/bayesian_neural_network.py#L124-L158
def plot_heldout_prediction(input_vals, probs, fname, n=10, title=""): """ """ fig = figure.Figure(figsize=(9, 3*n)) canvas = backend_agg.FigureCanvasAgg(fig) for i in range(n): ax = fig.add_subplot(n, 3, 3*i + 1) ax.imshow(input_vals[i, :].reshape(IMAGE_SHAPE[:-1]), interpolation="None") ax = fig.add_subplot(n, 3, 3*i + 2) for prob_sample in probs: sns.barplot(np.arange(10), prob_sample[i, :], alpha=0.1, ax=ax) ax.set_ylim([0, 1]) ax.set_title("posterior samples") ax = fig.add_subplot(n, 3, 3*i + 3) sns.barplot(np.arange(10), np.mean(probs[:, i, :], axis=0), ax=ax) ax.set_ylim([0, 1]) ax.set_title("predictive probs") fig.suptitle(title) fig.tight_layout() canvas.print_figure(fname, format="png") print("saved {}".format(fname))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet.py#L300-L315
def _maybe_assert_valid_concentration(self, concentration, validate_args): """""" if not validate_args: return concentration return distribution_util.with_dependencies([ assert_util.assert_positive( concentration, message="Concentration parameter must be positive."), assert_util.assert_rank_at_least( concentration, 1, message="Concentration parameter must have >=1 dimensions."), assert_util.assert_less( 1, tf.shape(input=concentration)[-1], message="Concentration parameter must have event_size >= 2."), ], concentration)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/internal/util.py#L201-L252
def empirical_statistics(observed_time_series): """ """ with tf.compat.v1.name_scope( 'empirical_statistics', values=[observed_time_series]): [ observed_time_series, mask ] = canonicalize_observed_time_series_with_mask(observed_time_series) squeezed_series = observed_time_series[..., 0] if mask is None: observed_mean, observed_variance = tf.nn.moments( x=squeezed_series, axes=-1) observed_initial = squeezed_series[..., 0] else: broadcast_mask = tf.broadcast_to(tf.cast(mask, tf.bool), tf.shape(input=squeezed_series)) observed_mean, observed_variance = ( missing_values_util.moments_of_masked_time_series( squeezed_series, broadcast_mask=broadcast_mask)) try: observed_initial = ( missing_values_util.initial_value_of_masked_time_series( squeezed_series, broadcast_mask=broadcast_mask)) except NotImplementedError: tf.compat.v1.logging.warn( 'Cannot compute initial values for a masked time series' 'with dynamic shape; using the mean instead. This will' 'affect heuristic priors and may change the results of' 'inference.') observed_initial = observed_mean observed_stddev = tf.sqrt(observed_variance) observed_initial_centered = observed_initial - observed_mean return observed_mean, observed_stddev, observed_initial_centered
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/imap_hook.py#L236-L264
def get_attachments_by_name(self, name, check_regex, find_first=False): """ """ attachments = [] for part in self.mail.walk(): mail_part = MailPart(part) if mail_part.is_attachment(): found_attachment = mail_part.has_matching_name(name) if check_regex \ else mail_part.has_equal_name(name) if found_attachment: file_name, file_payload = mail_part.get_file() self.log.info('Found attachment: {}'.format(file_name)) attachments.append((file_name, file_payload)) if find_first: break return attachments
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L285-L303
def with_rank_at_least(x, rank): # pylint: disable=redefined-outer-name """ """ return type(x)(tf.TensorShape(x).with_rank_at_least(rank))
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/sina.py#L54-L64
def sina_download_by_vkey(vkey, title=None, output_dir='.', merge=True, info_only=False): """ """ url = 'http://video.sina.com/v/flvideo/%s_0.flv' % vkey type, ext, size = url_info(url) print_info(site_info, title, 'flv', size) if not info_only: download_urls([url], title, 'flv', size, output_dir = output_dir, merge = merge)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/triangular.py#L33-L38
def _broadcast_to(tensor_to_broadcast, target_tensors): """""" output = tensor_to_broadcast for tensor in target_tensors: output += tf.zeros_like(tensor) return output
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1158-L1195
def build_backward_pass_step(get_transition_matrix_for_timestep): """ """ def backward_pass_step(state, filtered_parameters): """Run a single step of backward smoothing.""" (filtered_mean, filtered_cov, predicted_mean, predicted_cov) = filtered_parameters transition_matrix = get_transition_matrix_for_timestep(state.timestep) next_posterior_mean = state.backward_mean next_posterior_cov = state.backward_cov posterior_mean, posterior_cov = backward_smoothing_update( filtered_mean, filtered_cov, predicted_mean, predicted_cov, next_posterior_mean, next_posterior_cov, transition_matrix) return BackwardPassState(backward_mean=posterior_mean, backward_cov=posterior_cov, timestep=state.timestep-1) return backward_pass_step
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L383-L388
def _log_ndtr_lower(x, series_order): """""" x_2 = tf.square(x) # Log of the term multiplying (1 + sum) log_scale = -0.5 * x_2 - tf.math.log(-x) - 0.5 * np.log(2. * np.pi) return log_scale + tf.math.log(_log_ndtr_asymptotic_series(x, series_order))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L39-L209
def auto_correlation(x, axis=-1, max_lags=None, center=True, normalize=True, name='auto_correlation'): """ """ # Implementation details: # Extend length N / 2 1-D array x to length N by zero padding onto the end. # Then, set # F[x]_k := sum_n x_n exp{-i 2 pi k n / N }. # It is not hard to see that # F[x]_k Conj(F[x]_k) = F[R]_k, where # R_m := sum_n x_n Conj(x_{(n - m) mod N}). # One can also check that R_m / (N / 2 - m) is an unbiased estimate of RXX[m]. # Since F[x] is the DFT of x, this leads us to a zero-padding and FFT/IFFT # based version of estimating RXX. # Note that this is a special case of the Wiener-Khinchin Theorem. with tf.compat.v1.name_scope(name, values=[x]): x = tf.convert_to_tensor(value=x, name='x') # Rotate dimensions of x in order to put axis at the rightmost dim. # FFT op requires this. rank = util.prefer_static_rank(x) if axis < 0: axis = rank + axis shift = rank - 1 - axis # Suppose x.shape[axis] = T, so there are T 'time' steps. # ==> x_rotated.shape = B + [T], # where B is x_rotated's batch shape. x_rotated = util.rotate_transpose(x, shift) if center: x_rotated -= tf.reduce_mean( input_tensor=x_rotated, axis=-1, keepdims=True) # x_len = N / 2 from above explanation. The length of x along axis. # Get a value for x_len that works in all cases. x_len = util.prefer_static_shape(x_rotated)[-1] # TODO(langmore) Investigate whether this zero padding helps or hurts. At # the moment is necessary so that all FFT implementations work. # Zero pad to the next power of 2 greater than 2 * x_len, which equals # 2**(ceil(Log_2(2 * x_len))). Note: Log_2(X) = Log_e(X) / Log_e(2). x_len_float64 = tf.cast(x_len, np.float64) target_length = tf.pow( np.float64(2.), tf.math.ceil( tf.math.log(x_len_float64 * 2) / np.log(2.))) pad_length = tf.cast(target_length - x_len_float64, np.int32) # We should have: # x_rotated_pad.shape = x_rotated.shape[:-1] + [T + pad_length] # = B + [T + pad_length] x_rotated_pad = util.pad(x_rotated, axis=-1, back=True, count=pad_length) dtype = x.dtype if not dtype.is_complex: if not dtype.is_floating: raise TypeError('Argument x must have either float or complex dtype' ' found: {}'.format(dtype)) x_rotated_pad = tf.complex(x_rotated_pad, dtype.real_dtype.as_numpy_dtype(0.)) # Autocorrelation is IFFT of power-spectral density (up to some scaling). fft_x_rotated_pad = tf.signal.fft(x_rotated_pad) spectral_density = fft_x_rotated_pad * tf.math.conj(fft_x_rotated_pad) # shifted_product is R[m] from above detailed explanation. # It is the inner product sum_n X[n] * Conj(X[n - m]). shifted_product = tf.signal.ifft(spectral_density) # Cast back to real-valued if x was real to begin with. shifted_product = tf.cast(shifted_product, dtype) # Figure out if we can deduce the final static shape, and set max_lags. # Use x_rotated as a reference, because it has the time dimension in the far # right, and was created before we performed all sorts of crazy shape # manipulations. know_static_shape = True if not x_rotated.shape.is_fully_defined(): know_static_shape = False if max_lags is None: max_lags = x_len - 1 else: max_lags = tf.convert_to_tensor(value=max_lags, name='max_lags') max_lags_ = tf.get_static_value(max_lags) if max_lags_ is None or not know_static_shape: know_static_shape = False max_lags = tf.minimum(x_len - 1, max_lags) else: max_lags = min(x_len - 1, max_lags_) # Chop off the padding. # We allow users to provide a huge max_lags, but cut it off here. # shifted_product_chopped.shape = x_rotated.shape[:-1] + [max_lags] shifted_product_chopped = shifted_product[..., :max_lags + 1] # If possible, set shape. if know_static_shape: chopped_shape = x_rotated.shape.as_list() chopped_shape[-1] = min(x_len, max_lags + 1) shifted_product_chopped.set_shape(chopped_shape) # Recall R[m] is a sum of N / 2 - m nonzero terms x[n] Conj(x[n - m]). The # other terms were zeros arising only due to zero padding. # `denominator = (N / 2 - m)` (defined below) is the proper term to # divide by to make this an unbiased estimate of the expectation # E[X[n] Conj(X[n - m])]. x_len = tf.cast(x_len, dtype.real_dtype) max_lags = tf.cast(max_lags, dtype.real_dtype) denominator = x_len - tf.range(0., max_lags + 1.) denominator = tf.cast(denominator, dtype) shifted_product_rotated = shifted_product_chopped / denominator if normalize: shifted_product_rotated /= shifted_product_rotated[..., :1] # Transpose dimensions back to those of x. return util.rotate_transpose(shifted_product_rotated, -shift)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/postgres_to_gcs_operator.py#L166-L192
def _write_local_schema_file(self, cursor): """ """ schema = [] for field in cursor.description: # See PEP 249 for details about the description tuple. field_name = field[0] field_type = self.type_map(field[1]) field_mode = 'REPEATED' if field[1] in (1009, 1005, 1007, 1016) else 'NULLABLE' schema.append({ 'name': field_name, 'type': field_type, 'mode': field_mode, }) self.log.info('Using schema for %s: %s', self.schema_filename, schema) tmp_schema_file_handle = NamedTemporaryFile(delete=True) s = json.dumps(schema, sort_keys=True).encode('utf-8') tmp_schema_file_handle.write(s) return {self.schema_filename: tmp_schema_file_handle}
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L527-L548
def safe_search_detection( self, image, max_results=None, retry=None, timeout=None, additional_properties=None ): """ """ client = self.annotator_client self.log.info("Detecting safe search") if additional_properties is None: additional_properties = {} response = client.safe_search_detection( image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties ) response = MessageToDict(response) self._check_for_error(response) self.log.info("Safe search detection finished") return response
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/decorators.py#L29-L58
def action_logging(f): """ """ @functools.wraps(f) def wrapper(*args, **kwargs): with create_session() as session: if g.user.is_anonymous: user = 'anonymous' else: user = g.user.username log = Log( event=f.__name__, task_instance=None, owner=user, extra=str(list(request.args.items())), task_id=request.args.get('task_id'), dag_id=request.args.get('dag_id')) if 'execution_date' in request.args: log.execution_date = pendulum.parse( request.args.get('execution_date')) session.add(log) return f(*args, **kwargs) return wrapper
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L385-L405
def buildcontainer(self): """""" if self.container: return # Create SVG div with style if self.width: if self.width[-1] != '%': self.style += 'width:%spx;' % self.width else: self.style += 'width:%s;' % self.width if self.height: if self.height[-1] != '%': self.style += 'height:%spx;' % self.height else: self.style += 'height:%s;' % self.height if self.style: self.style = 'style="%s"' % self.style self.container = self.containerheader + \ '<div id="%s"><svg %s></svg></div>\n' % (self.name, self.style)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1198-L1271
def rotate_transpose(x, shift, name="rotate_transpose"): """ """ with tf.name_scope(name): x = tf.convert_to_tensor(value=x, name="x") shift = tf.convert_to_tensor(value=shift, name="shift") # We do not assign back to preserve constant-ness. assert_util.assert_integer(shift) shift_value_static = tf.get_static_value(shift) ndims = tensorshape_util.rank(x.shape) if ndims is not None and shift_value_static is not None: if ndims < 2: return x shift_value_static = np.sign(shift_value_static) * ( abs(shift_value_static) % ndims) if shift_value_static == 0: return x perm = np.roll(np.arange(ndims), shift_value_static) return tf.transpose(a=x, perm=perm) else: # Consider if we always had a positive shift, and some specified # direction. # When shifting left we want the new array: # last(x, n-shift) + first(x, shift) # and if shifting right then we want: # last(x, shift) + first(x, n-shift) # Observe that last(a) == slice(a, n) and first(a) == slice(0, a). # Also, we can encode direction and shift as one: direction * shift. # Combining these facts, we have: # a = cond(shift<0, -shift, n-shift) # last(x, n-a) + first(x, a) == x[a:n] + x[0:a] # Finally, we transform shift by modulo length so it can be specified # independently from the array upon which it operates (like python). ndims = tf.rank(x) shift = tf.where( tf.less(shift, 0), -shift % ndims, ndims - shift % ndims) first = tf.range(0, shift) last = tf.range(shift, ndims) perm = tf.concat([last, first], 0) return tf.transpose(a=x, perm=perm)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L608-L638
def get_partitions( self, schema, table_name, filter=None): """ """ with self.metastore as client: table = client.get_table(dbname=schema, tbl_name=table_name) if len(table.partitionKeys) == 0: raise AirflowException("The table isn't partitioned") else: if filter: parts = client.get_partitions_by_filter( db_name=schema, tbl_name=table_name, filter=filter, max_parts=HiveMetastoreHook.MAX_PART_COUNT) else: parts = client.get_partitions( db_name=schema, tbl_name=table_name, max_parts=HiveMetastoreHook.MAX_PART_COUNT) pnames = [p.name for p in table.partitionKeys] return [dict(zip(pnames, p.values)) for p in parts]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_container_instance_hook.py#L80-L93
def create_or_update(self, resource_group, name, container_group): """ """ self.connection.container_groups.create_or_update(resource_group, name, container_group)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L448-L511
def _start_driver_status_tracking(self): """ """ # When your Spark Standalone cluster is not performing well # due to misconfiguration or heavy loads. # it is possible that the polling request will timeout. # Therefore we use a simple retry mechanism. missed_job_status_reports = 0 max_missed_job_status_reports = 10 # Keep polling as long as the driver is processing while self._driver_status not in ["FINISHED", "UNKNOWN", "KILLED", "FAILED", "ERROR"]: # Sleep for 1 second as we do not want to spam the cluster time.sleep(1) self.log.debug("polling status of spark driver with id {}" .format(self._driver_id)) poll_drive_status_cmd = self._build_track_driver_status_command() status_process = subprocess.Popen(poll_drive_status_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1, universal_newlines=True) self._process_spark_status_log(iter(status_process.stdout.readline, '')) returncode = status_process.wait() if returncode: if missed_job_status_reports < max_missed_job_status_reports: missed_job_status_reports = missed_job_status_reports + 1 else: raise AirflowException( "Failed to poll for the driver status {} times: returncode = {}" .format(max_missed_job_status_reports, returncode) )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/dingding_hook.py#L76-L98
def _build_message(self): """ """ if self.message_type in ['text', 'markdown']: data = { 'msgtype': self.message_type, self.message_type: { 'content': self.message } if self.message_type == 'text' else self.message, 'at': { 'atMobiles': self.at_mobiles, 'isAtAll': self.at_all } } else: data = { 'msgtype': self.message_type, self.message_type: self.message } return json.dumps(data)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L905-L999
def embed_check_categorical_event_shape( categorical_param, name="embed_check_categorical_event_shape"): """ """ with tf.name_scope(name): x = tf.convert_to_tensor(value=categorical_param, name="categorical_param") # The size must not exceed both of: # - The largest possible int32 (since categorical values are presumed to be # indexes into a Tensor). # - The largest possible integer exactly representable under the given # floating-point dtype (since we need to cast to/from). # # The chosen floating-point thresholds are 2**(1 + mantissa_bits). # For more details, see: # https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation x_dtype = dtype_util.base_dtype(x.dtype) max_event_size = ( _largest_integer_by_dtype(x_dtype) if dtype_util.is_floating(x_dtype) else 0) if max_event_size is 0: raise TypeError("Unable to validate size of unrecognized dtype " "({}).".format(dtype_util.name(x_dtype))) try: x_shape_static = tensorshape_util.with_rank_at_least(x.shape, 1) except ValueError: raise ValueError("A categorical-distribution parameter must have " "at least 1 dimension.") event_size = tf.compat.dimension_value(x_shape_static[-1]) if event_size is not None: if event_size < 2: raise ValueError("A categorical-distribution parameter must have at " "least 2 events.") if event_size > max_event_size: raise ValueError("Number of classes exceeds `dtype` precision, i.e., " "{} implies shape ({}) cannot exceed {}.".format( dtype_util.name(x_dtype), event_size, max_event_size)) return x else: event_size = tf.shape(input=x, out_type=tf.int64, name="x_shape")[-1] return with_dependencies([ assert_util.assert_rank_at_least( x, 1, message=("A categorical-distribution parameter must have " "at least 1 dimension.")), assert_util.assert_greater_equal( tf.shape(input=x)[-1], 2, message=("A categorical-distribution parameter must have at " "least 2 events.")), assert_util.assert_less_equal( event_size, tf.convert_to_tensor(max_event_size, dtype=tf.int64), message="Number of classes exceeds `dtype` precision, " "i.e., {} dtype cannot exceed {} shape.".format( dtype_util.name(x_dtype), max_event_size)), ], x)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/nest_util.py#L185-L210
def call_fn(fn, args): """ """ if expand_as_args(args): return fn(*args) elif _expand_as_kwargs(args): return fn(**args) else: return fn(args)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L163-L180
def delete_instance(self, instance, project_id=None): """ """ response = self.get_conn().instances().delete( project=project_id, instance=instance, ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L349-L360
def new(params, event_size, validate_args=False, name=None): """""" with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL', [params, event_size]): params = tf.convert_to_tensor(value=params, name='params') scale_tril = tfb.ScaleTriL( diag_shift=np.array(1e-5, params.dtype.as_numpy_dtype()), validate_args=validate_args) return tfd.MultivariateNormalTriL( loc=params[..., :event_size], scale_tril=scale_tril(params[..., event_size:]), validate_args=validate_args)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/emr_hook.py#L39-L57
def create_job_flow(self, job_flow_overrides): """ """ if not self.emr_conn_id: raise AirflowException('emr_conn_id must be present to use create_job_flow') emr_conn = self.get_connection(self.emr_conn_id) config = emr_conn.extra_dejson.copy() config.update(job_flow_overrides) response = self.get_conn().run_job_flow(**config) return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L363-L367
def params_size(event_size, name=None): """""" with tf.compat.v1.name_scope(name, 'MultivariateNormalTriL_params_size', [event_size]): return event_size + event_size * (event_size + 1) // 2
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/bayesian_neural_network.py#L90-L121
def plot_weight_posteriors(names, qm_vals, qs_vals, fname): """ """ fig = figure.Figure(figsize=(6, 3)) canvas = backend_agg.FigureCanvasAgg(fig) ax = fig.add_subplot(1, 2, 1) for n, qm in zip(names, qm_vals): sns.distplot(qm.flatten(), ax=ax, label=n) ax.set_title("weight means") ax.set_xlim([-1.5, 1.5]) ax.legend() ax = fig.add_subplot(1, 2, 2) for n, qs in zip(names, qs_vals): sns.distplot(qs.flatten(), ax=ax) ax.set_title("weight stddevs") ax.set_xlim([0, 1.]) fig.tight_layout() canvas.print_figure(fname, format="png") print("saved {}".format(fname))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/backend/numpy/math.py#L172-L180
def _max_mask_non_finite(x, axis=-1, keepdims=False, mask=0): """""" m = np.max(x, axis=_astuple(axis), keepdims=keepdims) needs_masking = ~np.isfinite(m) if needs_masking.ndim > 0: m[needs_masking] = mask elif needs_masking: m = mask return m
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1973-L1990
def _bq_cast(string_field, bq_type): """ """ if string_field is None: return None elif bq_type == 'INTEGER': return int(string_field) elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP': return float(string_field) elif bq_type == 'BOOLEAN': if string_field not in ['true', 'false']: raise ValueError("{} must have value 'true' or 'false'".format( string_field)) return string_field == 'true' else: return string_field
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L228-L268
def make_decoder(activation, latent_size, output_shape, base_depth): """ """ deconv = functools.partial( tf.keras.layers.Conv2DTranspose, padding="SAME", activation=activation) conv = functools.partial( tf.keras.layers.Conv2D, padding="SAME", activation=activation) decoder_net = tf.keras.Sequential([ deconv(2 * base_depth, 7, padding="VALID"), deconv(2 * base_depth, 5), deconv(2 * base_depth, 5, 2), deconv(base_depth, 5), deconv(base_depth, 5, 2), deconv(base_depth, 5), conv(output_shape[-1], 5, activation=None), ]) def decoder(codes): original_shape = tf.shape(input=codes) # Collapse the sample and batch dimension and convert to rank-4 tensor for # use with a convolutional decoder network. codes = tf.reshape(codes, (-1, 1, 1, latent_size)) logits = decoder_net(codes) logits = tf.reshape( logits, shape=tf.concat([original_shape[:-1], output_shape], axis=0)) return tfd.Independent(tfd.Bernoulli(logits=logits), reinterpreted_batch_ndims=len(output_shape), name="image") return decoder
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/transpose.py#L291-L318
def _maybe_validate_perm(perm, validate_args, name=None): """""" with tf.name_scope(name or 'maybe_validate_perm'): assertions = [] if not dtype_util.is_integer(perm.dtype): raise TypeError('`perm` must be integer type') msg = '`perm` must be a vector.' if tensorshape_util.rank(perm.shape) is not None: if tensorshape_util.rank(perm.shape) != 1: raise ValueError( msg[:-1] + ', saw rank: {}.'.format(tensorshape_util.rank(perm.shape))) elif validate_args: assertions += [assert_util.assert_rank(perm, 1, message=msg)] perm_ = tf.get_static_value(perm) msg = '`perm` must be a valid permutation vector.' if perm_ is not None: if not np.all(np.arange(np.size(perm_)) == np.sort(perm_)): raise ValueError(msg[:-1] + ', saw: {}.'.format(perm_)) elif validate_args: assertions += [ assert_util.assert_equal( tf.sort(perm), tf.range(tf.size(input=perm)), message=msg) ] return assertions
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L537-L554
def adjust_brightness(img, brightness_factor): """ """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) enhancer = ImageEnhance.Brightness(img) img = enhancer.enhance(brightness_factor) return img
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L165-L178
def get_product_set( self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None ): """ """ client = self.get_conn() name = ProductSearchClient.product_set_path(project_id, location, product_set_id) self.log.info('Retrieving ProductSet: %s', name) response = client.get_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata) self.log.info('ProductSet retrieved.') self.log.debug('ProductSet retrieved:\n%s', response) return MessageToDict(response)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L32-L44
def _operator(attr): """ """ @functools.wraps(attr) def func(a, *args): return attr(a.value, *args) return func
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/proximal_hessian_sparse.py#L74-L105
def _sparse_or_dense_matmul_onehot(sparse_or_dense_matrix, col_index): """ """ if isinstance(sparse_or_dense_matrix, (tf.SparseTensor, tf.compat.v1.SparseTensorValue)): # TODO(b/111924846): Implement better (ideally in a way that allows us to # eliminate the `num_rows` arg, if possible). num_rows = _get_shape(sparse_or_dense_matrix)[-2] batch_shape = _get_shape(sparse_or_dense_matrix)[:-2] slice_start = tf.concat([tf.zeros_like(batch_shape), [0, col_index]], axis=0) slice_size = tf.concat([batch_shape, [num_rows, 1]], axis=0) # We momentarily lose static shape information in tf.sparse_slice. However # we regain it in the following tf.reshape. sparse_slice = tf.sparse.slice(sparse_or_dense_matrix, tf.cast(slice_start, tf.int64), tf.cast(slice_size, tf.int64)) output_shape = tf.concat([batch_shape, [num_rows]], axis=0) return tf.reshape(tf.sparse.to_dense(sparse_slice), output_shape) else: return tf.gather(sparse_or_dense_matrix, col_index, axis=-1)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/structural_time_series.py#L134-L159
def make_state_space_model(self, num_timesteps, param_vals=None, initial_state_prior=None, initial_step=0): """ """ return self._make_state_space_model( num_timesteps=num_timesteps, param_map=self._canonicalize_param_vals_as_map(param_vals), initial_state_prior=initial_state_prior, initial_step=initial_step)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/chain.py#L40-L109
def _compute_min_event_ndims(bijector_list, compute_forward=True): """ """ min_event_ndims = 0 # This is a mouthful, but what this encapsulates is that if not for rank # changing bijectors, we'd only need to compute the largest of the min # required ndims. Hence "max_min". Due to rank changing bijectors, we need to # account for synthetic rank growth / synthetic rank decrease from a rank # changing bijector. rank_changed_adjusted_max_min_event_ndims = 0 if compute_forward: bijector_list = reversed(bijector_list) for b in bijector_list: if compute_forward: current_min_event_ndims = b.forward_min_event_ndims current_inverse_min_event_ndims = b.inverse_min_event_ndims else: current_min_event_ndims = b.inverse_min_event_ndims current_inverse_min_event_ndims = b.forward_min_event_ndims # New dimensions were touched. if rank_changed_adjusted_max_min_event_ndims < current_min_event_ndims: min_event_ndims += ( current_min_event_ndims - rank_changed_adjusted_max_min_event_ndims) rank_changed_adjusted_max_min_event_ndims = max( current_min_event_ndims, rank_changed_adjusted_max_min_event_ndims) # If the number of dimensions has increased via forward, then # inverse_min_event_ndims > forward_min_event_ndims, and hence the # dimensions we computed on, have moved left (so we have operated # on additional dimensions). # Conversely, if the number of dimensions has decreased via forward, # then we have inverse_min_event_ndims < forward_min_event_ndims, # and so we will have operated on fewer right most dimensions. number_of_changed_dimensions = ( current_min_event_ndims - current_inverse_min_event_ndims) rank_changed_adjusted_max_min_event_ndims -= number_of_changed_dimensions return min_event_ndims
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/tensorshape_util.py#L192-L211
def merge_with(x, other): """ """ return type(x)(tf.TensorShape(x).merge_with(other))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vq_vae.py#L324-L350
def visualize_training(images_val, reconstructed_images_val, random_images_val, log_dir, prefix, viz_n=10): """ """ save_imgs(images_val[:viz_n], os.path.join(log_dir, "{}_inputs.png".format(prefix))) save_imgs(reconstructed_images_val[:viz_n], os.path.join(log_dir, "{}_reconstructions.png".format(prefix))) if random_images_val is not None: save_imgs(random_images_val[:viz_n], os.path.join(log_dir, "{}_prior_samples.png".format(prefix)))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/kubernetes/worker_configuration.py#L45-L131
def _get_init_containers(self): """""" # If we're using volume claims to mount the dags, no init container is needed if self.kube_config.dags_volume_claim or \ self.kube_config.dags_volume_host or self.kube_config.dags_in_image: return [] # Otherwise, define a git-sync init container init_environment = [{ 'name': 'GIT_SYNC_REPO', 'value': self.kube_config.git_repo }, { 'name': 'GIT_SYNC_BRANCH', 'value': self.kube_config.git_branch }, { 'name': 'GIT_SYNC_ROOT', 'value': self.kube_config.git_sync_root }, { 'name': 'GIT_SYNC_DEST', 'value': self.kube_config.git_sync_dest }, { 'name': 'GIT_SYNC_DEPTH', 'value': '1' }, { 'name': 'GIT_SYNC_ONE_TIME', 'value': 'true' }] if self.kube_config.git_user: init_environment.append({ 'name': 'GIT_SYNC_USERNAME', 'value': self.kube_config.git_user }) if self.kube_config.git_password: init_environment.append({ 'name': 'GIT_SYNC_PASSWORD', 'value': self.kube_config.git_password }) volume_mounts = [{ 'mountPath': self.kube_config.git_sync_root, 'name': self.dags_volume_name, 'readOnly': False }] if self.kube_config.git_ssh_key_secret_name: volume_mounts.append({ 'name': self.git_sync_ssh_secret_volume_name, 'mountPath': '/etc/git-secret/ssh', 'subPath': 'ssh' }) init_environment.extend([ { 'name': 'GIT_SSH_KEY_FILE', 'value': '/etc/git-secret/ssh' }, { 'name': 'GIT_SYNC_SSH', 'value': 'true' }]) if self.kube_config.git_ssh_known_hosts_configmap_name: volume_mounts.append({ 'name': self.git_sync_ssh_known_hosts_volume_name, 'mountPath': '/etc/git-secret/known_hosts', 'subPath': 'known_hosts' }) init_environment.extend([ { 'name': 'GIT_KNOWN_HOSTS', 'value': 'true' }, { 'name': 'GIT_SSH_KNOWN_HOSTS_FILE', 'value': '/etc/git-secret/known_hosts' } ]) else: init_environment.append({ 'name': 'GIT_KNOWN_HOSTS', 'value': 'false' }) return [{ 'name': self.kube_config.git_sync_init_container_name, 'image': self.kube_config.git_sync_container, 'securityContext': {'runAsUser': 65533}, # git-sync user 'env': init_environment, 'volumeMounts': volume_mounts }]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L361-L397
def operations_contain_expected_statuses(operations, expected_statuses): """ """ expected_statuses = ( {expected_statuses} if isinstance(expected_statuses, six.string_types) else set(expected_statuses) ) if len(operations) == 0: return False current_statuses = {operation[METADATA][STATUS] for operation in operations} if len(current_statuses - set(expected_statuses)) != len(current_statuses): return True if len(NEGATIVE_STATUSES - current_statuses) != len(NEGATIVE_STATUSES): raise AirflowException( 'An unexpected operation status was encountered. Expected: {}'.format( ", ".join(expected_statuses) ) ) return False
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L160-L178
def maybe_broadcast_structure(from_structure: Any, to_structure: Any) -> Any: """ """ flat_from = tf.nest.flatten(from_structure) flat_to = tf.nest.flatten(to_structure) if len(flat_from) == 1: flat_from *= len(flat_to) return tf.nest.pack_sequence_as(to_structure, flat_from)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L330-L524
def create_external_table(self, external_project_dataset_table, schema_fields, source_uris, source_format='CSV', autodetect=False, compression='NONE', ignore_unknown_values=False, max_bad_records=0, skip_leading_rows=0, field_delimiter=',', quote_character=None, allow_quoted_newlines=False, allow_jagged_rows=False, src_fmt_configs=None, labels=None ): """ """ if src_fmt_configs is None: src_fmt_configs = {} project_id, dataset_id, external_table_id = \ _split_tablename(table_input=external_project_dataset_table, default_project_id=self.project_id, var_name='external_project_dataset_table') # bigquery only allows certain source formats # we check to make sure the passed source format is valid # if it's not, we raise a ValueError # Refer to this link for more details: # https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat source_format = source_format.upper() allowed_formats = [ "CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS", "DATASTORE_BACKUP", "PARQUET" ] if source_format not in allowed_formats: raise ValueError("{0} is not a valid source format. " "Please use one of the following types: {1}" .format(source_format, allowed_formats)) compression = compression.upper() allowed_compressions = ['NONE', 'GZIP'] if compression not in allowed_compressions: raise ValueError("{0} is not a valid compression format. " "Please use one of the following types: {1}" .format(compression, allowed_compressions)) table_resource = { 'externalDataConfiguration': { 'autodetect': autodetect, 'sourceFormat': source_format, 'sourceUris': source_uris, 'compression': compression, 'ignoreUnknownValues': ignore_unknown_values }, 'tableReference': { 'projectId': project_id, 'datasetId': dataset_id, 'tableId': external_table_id, } } if schema_fields: table_resource['externalDataConfiguration'].update({ 'schema': { 'fields': schema_fields } }) self.log.info('Creating external table: %s', external_project_dataset_table) if max_bad_records: table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records # if following fields are not specified in src_fmt_configs, # honor the top-level params for backward-compatibility if 'skipLeadingRows' not in src_fmt_configs: src_fmt_configs['skipLeadingRows'] = skip_leading_rows if 'fieldDelimiter' not in src_fmt_configs: src_fmt_configs['fieldDelimiter'] = field_delimiter if 'quote_character' not in src_fmt_configs: src_fmt_configs['quote'] = quote_character if 'allowQuotedNewlines' not in src_fmt_configs: src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines if 'allowJaggedRows' not in src_fmt_configs: src_fmt_configs['allowJaggedRows'] = allow_jagged_rows src_fmt_to_param_mapping = { 'CSV': 'csvOptions', 'GOOGLE_SHEETS': 'googleSheetsOptions' } src_fmt_to_configs_mapping = { 'csvOptions': [ 'allowJaggedRows', 'allowQuotedNewlines', 'fieldDelimiter', 'skipLeadingRows', 'quote' ], 'googleSheetsOptions': ['skipLeadingRows'] } if source_format in src_fmt_to_param_mapping.keys(): valid_configs = src_fmt_to_configs_mapping[ src_fmt_to_param_mapping[source_format] ] src_fmt_configs = { k: v for k, v in src_fmt_configs.items() if k in valid_configs } table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[ source_format]] = src_fmt_configs if labels: table_resource['labels'] = labels try: self.service.tables().insert( projectId=project_id, datasetId=dataset_id, body=table_resource ).execute(num_retries=self.num_retries) self.log.info('External table created successfully: %s', external_project_dataset_table) except HttpError as err: raise Exception( 'BigQuery job failed. Error was: {}'.format(err.content) )
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/transformed_distribution.py#L657-L666
def _maybe_rotate_dims(self, x, rotate_right=False): """""" needs_rotation_const = tf.get_static_value(self._needs_rotation) if needs_rotation_const is not None and not needs_rotation_const: return x ndims = prefer_static.rank(x) n = (ndims - self._rotate_ndims) if rotate_right else self._rotate_ndims perm = prefer_static.concat([ prefer_static.range(n, ndims), prefer_static.range(0, n)], axis=0) return tf.transpose(a=x, perm=perm)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/nelder_mead.py#L792-L829
def _prepare_args_with_initial_vertex(objective_function, initial_vertex, step_sizes, objective_at_initial_vertex, batch_evaluate_objective): """""" dim = tf.size(input=initial_vertex) num_vertices = dim + 1 unit_vectors_along_axes = tf.reshape( tf.eye(dim, dim, dtype=initial_vertex.dtype.base_dtype), tf.concat([[dim], tf.shape(input=initial_vertex)], axis=0)) # If step_sizes does not broadcast to initial_vertex, the multiplication # in the second term will fail. simplex_face = initial_vertex + step_sizes * unit_vectors_along_axes simplex = tf.concat([tf.expand_dims(initial_vertex, axis=0), simplex_face], axis=0) num_evaluations = 0 # Evaluate the objective function at the simplex vertices. if objective_at_initial_vertex is None: objective_at_initial_vertex = objective_function(initial_vertex) num_evaluations += 1 objective_at_simplex_face, num_evals = _evaluate_objective_multiple( objective_function, simplex_face, batch_evaluate_objective) num_evaluations += num_evals objective_at_simplex = tf.concat( [ tf.expand_dims(objective_at_initial_vertex, axis=0), objective_at_simplex_face ], axis=0) return (dim, num_vertices, simplex, objective_at_simplex, num_evaluations)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1915-L1927
def fetchall(self): """ """ result = [] while True: one = self.fetchone() if one is None: break else: result.append(one) return result
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L157-L178
def configure_s3_resources(self, config): """ """ s3_operations = config.pop('S3Operations', None) if s3_operations is not None: create_bucket_ops = s3_operations.get('S3CreateBucket', []) upload_ops = s3_operations.get('S3Upload', []) for op in create_bucket_ops: self.s3_hook.create_bucket(bucket_name=op['Bucket']) for op in upload_ops: if op['Tar']: self.tar_and_s3_upload(op['Path'], op['Key'], op['Bucket']) else: self.s3_hook.load_file(op['Path'], op['Key'], op['Bucket'])
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_diffeomixture.py#L899-L933
def linop_scale(w, op): """""" # We assume w > 0. (This assumption only relates to the is_* attributes.) with tf.name_scope("linop_scale"): # TODO(b/35301104): LinearOperatorComposition doesn't combine operators, so # special case combinations here. Once it does, this function can be # replaced by: # return linop_composition_lib.LinearOperatorComposition([ # scaled_identity(w), op]) def scaled_identity(w): return tf.linalg.LinearOperatorScaledIdentity( num_rows=op.range_dimension_tensor(), multiplier=w, is_non_singular=op.is_non_singular, is_self_adjoint=op.is_self_adjoint, is_positive_definite=op.is_positive_definite) if isinstance(op, tf.linalg.LinearOperatorIdentity): return scaled_identity(w) if isinstance(op, tf.linalg.LinearOperatorScaledIdentity): return scaled_identity(w * op.multiplier) if isinstance(op, tf.linalg.LinearOperatorDiag): return tf.linalg.LinearOperatorDiag( diag=w[..., tf.newaxis] * op.diag_part(), is_non_singular=op.is_non_singular, is_self_adjoint=op.is_self_adjoint, is_positive_definite=op.is_positive_definite) if isinstance(op, tf.linalg.LinearOperatorLowerTriangular): return tf.linalg.LinearOperatorLowerTriangular( tril=w[..., tf.newaxis, tf.newaxis] * op.to_dense(), is_non_singular=op.is_non_singular, is_self_adjoint=op.is_self_adjoint, is_positive_definite=op.is_positive_definite) raise NotImplementedError( "Unsupported Linop type ({})".format(type(op).__name__))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L36-L73
def _bessel_ive(v, z, cache=None): """""" # TODO(b/67497980): Switch to a more numerically faithful implementation. z = tf.convert_to_tensor(value=z) wrap = lambda result: tf.debugging.check_numerics(result, 'besseli{}'.format(v )) if float(v) >= 2: raise ValueError( 'Evaluating bessel_i by recurrence becomes imprecise for large v') cache = cache or {} safe_z = tf.where(z > 0, z, tf.ones_like(z)) if v in cache: return wrap(cache[v]) if v == 0: cache[v] = tf.math.bessel_i0e(z) elif v == 1: cache[v] = tf.math.bessel_i1e(z) elif v == 0.5: # sinh(x)*exp(-abs(x)), sinh(x) = (e^x - e^{-x}) / 2 sinhe = lambda x: (tf.exp(x - tf.abs(x)) - tf.exp(-x - tf.abs(x))) / 2 cache[v] = ( np.sqrt(2 / np.pi) * sinhe(z) * tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z))) elif v == -0.5: # cosh(x)*exp(-abs(x)), cosh(x) = (e^x + e^{-x}) / 2 coshe = lambda x: (tf.exp(x - tf.abs(x)) + tf.exp(-x - tf.abs(x))) / 2 cache[v] = ( np.sqrt(2 / np.pi) * coshe(z) * tf.where(z > 0, tf.math.rsqrt(safe_z), tf.ones_like(safe_z))) if v <= 1: return wrap(cache[v]) # Recurrence relation: cache[v] = (_bessel_ive(v - 2, z, cache) - (2 * (v - 1)) * _bessel_ive(v - 1, z, cache) / z) return wrap(cache[v])
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L1535-L1574
def build_kalman_mean_step(get_transition_matrix_for_timestep, get_transition_noise_for_timestep, get_observation_matrix_for_timestep, get_observation_noise_for_timestep): """ """ def mean_step(previous_means, t): """Single step of prior mean recursion.""" previous_latent_mean, _ = previous_means latent_mean = _propagate_mean(previous_latent_mean, get_transition_matrix_for_timestep(t - 1), get_transition_noise_for_timestep(t - 1)) observation_mean = _propagate_mean(latent_mean, get_observation_matrix_for_timestep(t), get_observation_noise_for_timestep(t)) return (latent_mean, observation_mean) return mean_step
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L920-L938
def get_records(self, hql, schema='default', hive_conf=None): """ """ return self.get_results(hql, schema=schema, hive_conf=hive_conf)['data']
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_natural_language_hook.py#L56-L80
def analyze_entities(self, document, encoding_type=None, retry=None, timeout=None, metadata=None): """ """ client = self.get_conn() return client.analyze_entities( document=document, encoding_type=encoding_type, retry=retry, timeout=timeout, metadata=metadata )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/salesforce_hook.py#L76-L93
def make_query(self, query): """ """ conn = self.get_conn() self.log.info("Querying for all objects") query_results = conn.query_all(query) self.log.info("Received results: Total size: %s; Done: %s", query_results['totalSize'], query_results['done']) return query_results
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/mark_tasks.py#L30-L53
def _create_dagruns(dag, execution_dates, state, run_id_template): """ """ # find out if we need to create any dag runs drs = DagRun.find(dag_id=dag.dag_id, execution_date=execution_dates) dates_to_create = list(set(execution_dates) - set([dr.execution_date for dr in drs])) for date in dates_to_create: dr = dag.create_dagrun( run_id=run_id_template.format(date.isoformat()), execution_date=date, start_date=timezone.utcnow(), external_trigger=False, state=state, ) drs.append(dr) return drs
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/von_mises_fisher.py#L358-L385
def _sample_3d(self, n, seed=None): """""" seed = seed_stream.SeedStream(seed, salt='von_mises_fisher_3d') u_shape = tf.concat([[n], self._batch_shape_tensor()], axis=0) z = tf.random.uniform(u_shape, seed=seed(), dtype=self.dtype) # TODO(bjp): Higher-order odd dim analytic CDFs are available in [1], could # be bisected for bounded sampling runtime (i.e. not rejection sampling). # [1]: Inversion sampler via: https://ieeexplore.ieee.org/document/7347705/ # The inversion is: u = 1 + log(z + (1-z)*exp(-2*kappa)) / kappa # We must protect against both kappa and z being zero. safe_conc = tf.where(self.concentration > 0, self.concentration, tf.ones_like(self.concentration)) safe_z = tf.where(z > 0, z, tf.ones_like(z)) safe_u = 1 + tf.reduce_logsumexp( input_tensor=[ tf.math.log(safe_z), tf.math.log1p(-safe_z) - 2 * safe_conc ], axis=0) / safe_conc # Limit of the above expression as kappa->0 is 2*z-1 u = tf.where(self.concentration > tf.zeros_like(safe_u), safe_u, 2 * z - 1) # Limit of the expression as z->0 is -1. u = tf.where(tf.equal(z, 0), -tf.ones_like(u), u) if not self._allow_nan_stats: u = tf.debugging.check_numerics(u, 'u in _sample_3d') return u[..., tf.newaxis]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L354-L375
def get_broadcast_shape(*tensors): """ """ # Try static. s_shape = tensors[0].shape for t in tensors[1:]: s_shape = tf.broadcast_static_shape(s_shape, t.shape) if tensorshape_util.is_fully_defined(s_shape): return tensorshape_util.as_list(s_shape) # Fallback on dynamic. d_shape = tf.shape(input=tensors[0]) for t in tensors[1:]: d_shape = tf.broadcast_dynamic_shape(d_shape, tf.shape(input=t)) return d_shape
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_container_hook.py#L131-L168
def delete_cluster(self, name, project_id=None, retry=DEFAULT, timeout=DEFAULT): """ """ self.log.info( "Deleting (project_id=%s, zone=%s, cluster_id=%s)", self.project_id, self.location, name ) try: op = self.get_client().delete_cluster(project_id=project_id or self.project_id, zone=self.location, cluster_id=name, retry=retry, timeout=timeout) op = self.wait_for_operation(op) # Returns server-defined url for the resource return op.self_link except NotFound as error: self.log.info('Assuming Success: %s', error.message)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1084-L1103
def new(params, event_shape=(), validate_args=False, name=None): """""" with tf.compat.v1.name_scope(name, 'IndependentPoisson', [params, event_shape]): params = tf.convert_to_tensor(value=params, name='params') event_shape = dist_util.expand_to_vector( tf.convert_to_tensor( value=event_shape, name='event_shape', dtype_hint=tf.int32), tensor_name='event_shape') output_shape = tf.concat([ tf.shape(input=params)[:-1], event_shape, ], axis=0) return tfd.Independent( tfd.Poisson( log_rate=tf.reshape(params, output_shape), validate_args=validate_args), reinterpreted_batch_ndims=tf.size(input=event_shape), validate_args=validate_args)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/reshape.py#L243-L313
def _replace_event_shape_in_shape_tensor( input_shape, event_shape_in, event_shape_out, validate_args): """ """ output_tensorshape, is_validated = _replace_event_shape_in_tensorshape( tensorshape_util.constant_value_as_shape(input_shape), event_shape_in, event_shape_out) # TODO(b/124240153): Remove map(tf.identity, deps) once tf.function # correctly supports control_dependencies. validation_dependencies = ( map(tf.identity, (event_shape_in, event_shape_out)) if validate_args else ()) if (tensorshape_util.is_fully_defined(output_tensorshape) and (is_validated or not validate_args)): with tf.control_dependencies(validation_dependencies): output_shape = tf.convert_to_tensor( value=output_tensorshape, name='output_shape', dtype_hint=tf.int32) return output_shape, output_tensorshape with tf.control_dependencies(validation_dependencies): event_shape_in_ndims = ( tf.size(input=event_shape_in) if tensorshape_util.num_elements(event_shape_in.shape) is None else tensorshape_util.num_elements(event_shape_in.shape)) input_non_event_shape, input_event_shape = tf.split( input_shape, num_or_size_splits=[-1, event_shape_in_ndims]) additional_assertions = [] if is_validated: pass elif validate_args: # Check that `input_event_shape` and `event_shape_in` are compatible in the # sense that they have equal entries in any position that isn't a `-1` in # `event_shape_in`. Note that our validations at construction time ensure # there is at most one such entry in `event_shape_in`. mask = event_shape_in >= 0 explicit_input_event_shape = tf.boolean_mask( tensor=input_event_shape, mask=mask) explicit_event_shape_in = tf.boolean_mask( tensor=event_shape_in, mask=mask) additional_assertions.append( assert_util.assert_equal( explicit_input_event_shape, explicit_event_shape_in, message='Input `event_shape` does not match `event_shape_in`.')) # We don't explicitly additionally verify # `tf.size(input_shape) > tf.size(event_shape_in)` since `tf.split` # already makes this assertion. with tf.control_dependencies(additional_assertions): output_shape = tf.concat([input_non_event_shape, event_shape_out], axis=0, name='output_shape') return output_shape, output_tensorshape
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_video_intelligence_hook.py#L51-L105
def annotate_video( self, input_uri=None, input_content=None, features=None, video_context=None, output_uri=None, location=None, retry=None, timeout=None, metadata=None, ): """ """ client = self.get_conn() return client.annotate_video( input_uri=input_uri, input_content=input_content, features=features, video_context=video_context, output_uri=output_uri, location_id=location, retry=retry, timeout=timeout, metadata=metadata, )
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L39-L82
def mixture_stddev(mixture_weight_vector, mean_vector, stddev_vector): """ """ tensorshape_util.assert_has_rank(mixture_weight_vector.shape, 2) if not tensorshape_util.is_compatible_with(mean_vector.shape, mixture_weight_vector.shape): raise ValueError("Expecting means to have same shape as mixture weights.") if not tensorshape_util.is_compatible_with(stddev_vector.shape, mixture_weight_vector.shape): raise ValueError("Expecting stddevs to have same shape as mixture weights.") # Reshape the distribution parameters for batched vectorized dot products. pi_for_dot_prod = tf.expand_dims(mixture_weight_vector, axis=1) mu_for_dot_prod = tf.expand_dims(mean_vector, axis=2) sigma_for_dot_prod = tf.expand_dims(stddev_vector, axis=2) # weighted average of component means under mixture distribution. mean_wa = tf.matmul(pi_for_dot_prod, mu_for_dot_prod) mean_wa = tf.reshape(mean_wa, (-1,)) # weighted average of component variances under mixture distribution. var_wa = tf.matmul(pi_for_dot_prod, tf.square(sigma_for_dot_prod)) var_wa = tf.reshape(var_wa, (-1,)) # weighted average of component squared means under mixture distribution. sq_mean_wa = tf.matmul(pi_for_dot_prod, tf.square(mu_for_dot_prod)) sq_mean_wa = tf.reshape(sq_mean_wa, (-1,)) mixture_variance = var_wa + sq_mean_wa - tf.square(mean_wa) return tf.sqrt(mixture_variance)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L212-L244
def create_database(self, instance_id, database_id, ddl_statements, project_id=None): """ """ instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id, ddl_statements=ddl_statements) try: operation = database.create() # type: Operation except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result) return
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/linear_gaussian_ssm.py#L981-L1038
def _joint_mean(self): """ """ with tf.name_scope("mean_joint"): # The initial timestep is a special case, since we sample the # latent state from the prior rather than the transition model. with tf.control_dependencies(self.runtime_assertions): # Broadcast to ensure we represent the full batch shape. initial_latent_mean = _broadcast_to_shape( self.initial_state_prior.mean()[..., tf.newaxis], tf.concat([self.batch_shape_tensor(), [self.latent_size, 1]], axis=0)) initial_observation_mean = _propagate_mean( initial_latent_mean, self.get_observation_matrix_for_timestep(self.initial_step), self.get_observation_noise_for_timestep(self.initial_step)) mean_step = build_kalman_mean_step( self.get_transition_matrix_for_timestep, self.get_transition_noise_for_timestep, self.get_observation_matrix_for_timestep, self.get_observation_noise_for_timestep) # Scan over all timesteps following the initial step. (latent_means, observation_means) = tf.scan( mean_step, elems=tf.range(self.initial_step+1, self.final_step), initializer=(initial_latent_mean, initial_observation_mean)) # Squish the initial step back on top of the other (scanned) timesteps latent_means = tf.concat([initial_latent_mean[tf.newaxis, ...], latent_means], axis=0) observation_means = tf.concat([initial_observation_mean[tf.newaxis, ...], observation_means], axis=0) # Put dimensions back in order. The samples we've computed have # shape `[num_timesteps, batch_shape, size, 1]`, where `size` # is the dimension of the latent or observation spaces # respectively, but we want to return values with shape # `[batch_shape, num_timesteps, size]`. latent_means = tf.squeeze(latent_means, -1) latent_means = distribution_util.move_dimension(latent_means, 0, -2) observation_means = tf.squeeze(observation_means, -1) observation_means = distribution_util.move_dimension( observation_means, 0, -2) return latent_means, observation_means
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/internal/util.py#L52-L116
def pad_batch_dimension_for_multiple_chains( observed_time_series, model, chain_batch_shape): """"""" # Running with multiple chains introduces an extra batch dimension. In # general we also need to pad the observed time series with a matching batch # dimension. # # For example, suppose our model has batch shape [3, 4] and # the observed time series has shape `concat([[5], [3, 4], [100])`, # corresponding to `sample_shape`, `batch_shape`, and `num_timesteps` # respectively. The model will produce distributions with batch shape # `concat([chain_batch_shape, [3, 4]])`, so we pad `observed_time_series` to # have matching shape `[5, 1, 3, 4, 100]`, where the added `1` dimension # between the sample and batch shapes will broadcast to `chain_batch_shape`. [ # Extract mask and guarantee `event_ndims=2`. observed_time_series, is_missing ] = canonicalize_observed_time_series_with_mask(observed_time_series) event_ndims = 2 # event_shape = [num_timesteps, observation_size=1] model_batch_ndims = ( model.batch_shape.ndims if model.batch_shape.ndims is not None else tf.shape(input=model.batch_shape_tensor())[0]) # Compute ndims from chain_batch_shape. chain_batch_shape = tf.convert_to_tensor( value=chain_batch_shape, name='chain_batch_shape', dtype=tf.int32) if not chain_batch_shape.shape.is_fully_defined(): raise ValueError('Batch shape must have static rank. (given: {})'.format( chain_batch_shape)) if chain_batch_shape.shape.ndims == 0: # expand int `k` to `[k]`. chain_batch_shape = chain_batch_shape[tf.newaxis] chain_batch_ndims = tf.compat.dimension_value(chain_batch_shape.shape[0]) def do_padding(observed_time_series_tensor): current_sample_shape = tf.shape( input=observed_time_series_tensor)[:-(model_batch_ndims + event_ndims)] current_batch_and_event_shape = tf.shape( input=observed_time_series_tensor)[-(model_batch_ndims + event_ndims):] return tf.reshape( tensor=observed_time_series_tensor, shape=tf.concat([ current_sample_shape, tf.ones([chain_batch_ndims], dtype=tf.int32), current_batch_and_event_shape], axis=0)) # Padding is only needed if the observed time series has sample shape. observed_time_series = prefer_static.cond( (dist_util.prefer_static_rank(observed_time_series) > model_batch_ndims + event_ndims), lambda: do_padding(observed_time_series), lambda: observed_time_series) if is_missing is not None: is_missing = prefer_static.cond( (dist_util.prefer_static_rank(is_missing) > model_batch_ndims + event_ndims), lambda: do_padding(is_missing), lambda: is_missing) return missing_values_util.MaskedTimeSeries(observed_time_series, is_missing=is_missing) return observed_time_series
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/batch_reshape.py#L380-L409
def calculate_reshape(original_shape, new_shape, validate=False, name=None): """""" batch_shape_static = tensorshape_util.constant_value_as_shape(new_shape) if tensorshape_util.is_fully_defined(batch_shape_static): return np.int32(batch_shape_static), batch_shape_static, [] with tf.name_scope(name or "calculate_reshape"): original_size = tf.reduce_prod(input_tensor=original_shape) implicit_dim = tf.equal(new_shape, -1) size_implicit_dim = ( original_size // tf.maximum(1, -tf.reduce_prod(input_tensor=new_shape))) new_ndims = tf.shape(input=new_shape) expanded_new_shape = tf.where( # Assumes exactly one `-1`. implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape) validations = [] if not validate else [ # pylint: disable=g-long-ternary assert_util.assert_rank( original_shape, 1, message="Original shape must be a vector."), assert_util.assert_rank( new_shape, 1, message="New shape must be a vector."), assert_util.assert_less_equal( tf.math.count_nonzero(implicit_dim, dtype=tf.int32), 1, message="At most one dimension can be unknown."), assert_util.assert_positive( expanded_new_shape, message="Shape elements must be >=-1."), assert_util.assert_equal( tf.reduce_prod(input_tensor=expanded_new_shape), original_size, message="Shape sizes do not match."), ] return expanded_new_shape, batch_shape_static, validations
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L119-L132
def create_transfer_job(self, body): """ """ body = self._inject_project_id(body, BODY, PROJECT_ID) return self.get_conn().transferJobs().create(body=body).execute(num_retries=self.num_retries)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/langevin.py#L932-L997
def _prepare_args(target_log_prob_fn, volatility_fn, state, step_size, target_log_prob=None, grads_target_log_prob=None, volatility=None, grads_volatility_fn=None, diffusion_drift=None, parallel_iterations=10): """""" state_parts = list(state) if mcmc_util.is_list_like(state) else [state] [ target_log_prob, grads_target_log_prob, ] = mcmc_util.maybe_call_fn_and_grads( target_log_prob_fn, state_parts, target_log_prob, grads_target_log_prob) [ volatility_parts, grads_volatility, ] = _maybe_call_volatility_fn_and_grads( volatility_fn, state_parts, volatility, grads_volatility_fn, distribution_util.prefer_static_shape(target_log_prob), parallel_iterations) step_sizes = (list(step_size) if mcmc_util.is_list_like(step_size) else [step_size]) step_sizes = [ tf.convert_to_tensor( value=s, name='step_size', dtype=target_log_prob.dtype) for s in step_sizes ] if len(step_sizes) == 1: step_sizes *= len(state_parts) if len(state_parts) != len(step_sizes): raise ValueError('There should be exactly one `step_size` or it should ' 'have same length as `current_state`.') if diffusion_drift is None: diffusion_drift_parts = _get_drift(step_sizes, volatility_parts, grads_volatility, grads_target_log_prob) else: diffusion_drift_parts = (list(diffusion_drift) if mcmc_util.is_list_like(diffusion_drift) else [diffusion_drift]) if len(state_parts) != len(diffusion_drift): raise ValueError('There should be exactly one `diffusion_drift` or it ' 'should have same length as list-like `current_state`.') return [ state_parts, step_sizes, target_log_prob, grads_target_log_prob, volatility_parts, grads_volatility, diffusion_drift_parts, ]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L195-L211
def infer_time_unit(time_seconds_arr): """ """ if len(time_seconds_arr) == 0: return 'hours' max_time_seconds = max(time_seconds_arr) if max_time_seconds <= 60 * 2: return 'seconds' elif max_time_seconds <= 60 * 60 * 2: return 'minutes' elif max_time_seconds <= 24 * 60 * 60 * 2: return 'hours' else: return 'days'
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_diffeomixture.py#L745-L755
def _expand_base_distribution_mean(self): """""" single_draw_shape = concat_vectors(self.batch_shape_tensor(), self.event_shape_tensor()) m = tf.reshape( self.distribution.mean(), # A scalar. shape=tf.ones_like(single_draw_shape, dtype=tf.int32)) m = tf.tile(m, multiples=single_draw_shape) tensorshape_util.set_shape( m, tensorshape_util.concatenate(self.batch_shape, self.event_shape)) return m
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagrun.py#L163-L188
def get_task_instances(self, state=None, session=None): """ """ from airflow.models.taskinstance import TaskInstance # Avoid circular import tis = session.query(TaskInstance).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.execution_date == self.execution_date, ) if state: if isinstance(state, six.string_types): tis = tis.filter(TaskInstance.state == state) else: # this is required to deal with NULL values if None in state: tis = tis.filter( or_(TaskInstance.state.in_(state), TaskInstance.state.is_(None)) ) else: tis = tis.filter(TaskInstance.state.in_(state)) if self.dag and self.dag.partial: tis = tis.filter(TaskInstance.task_id.in_(self.dag.task_ids)) return tis.all()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/program_transformations.py#L138-L223
def make_log_joint_fn(model): """ """ def log_joint_fn(*args, **kwargs): """Log-probability of inputs according to a joint probability distribution. Args: *args: Positional arguments. They are the model's original inputs and can alternatively be specified as part of `kwargs`. **kwargs: Keyword arguments, where for each key-value pair `k` and `v`, `v` is passed as a `value` to the random variable(s) whose keyword argument `name` during construction is equal to `k`. Returns: Scalar tf.Tensor, which represents the model's log-probability summed over all Edward random variables and their dimensions. Raises: TypeError: If a random variable in the model has no specified value in `**kwargs`. """ log_probs = [] def interceptor(rv_constructor, *rv_args, **rv_kwargs): """Overrides a random variable's `value` and accumulates its log-prob.""" # Set value to keyword argument indexed by `name` (an input tensor). rv_name = rv_kwargs.get("name") if rv_name is None: raise KeyError("Random variable constructor {} has no name " "in its arguments.".format(rv_constructor.__name__)) # If no value is explicitly passed in for an RV, default to the value # from the RV constructor. This may have been set explicitly by the user # or forwarded from a lower-level interceptor. previously_specified_value = rv_kwargs.get("value") value = kwargs.get(rv_name, previously_specified_value) if value is None: raise LookupError("Keyword argument specifying value for {} is " "missing.".format(rv_name)) rv_kwargs["value"] = value rv = rv_constructor(*rv_args, **rv_kwargs) log_prob = tf.reduce_sum(input_tensor=rv.distribution.log_prob(rv.value)) log_probs.append(log_prob) return rv model_kwargs = _get_function_inputs(model, kwargs) with interception(interceptor): model(*args, **model_kwargs) log_prob = sum(log_probs) return log_prob return log_joint_fn
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1451-L1477
def params_size(num_components, component_params_size, name=None): """ """ with tf.compat.v1.name_scope(name, 'MixtureSameFamily_params_size', [num_components, component_params_size]): num_components = tf.convert_to_tensor( value=num_components, name='num_components', dtype_hint=tf.int32) component_params_size = tf.convert_to_tensor( value=component_params_size, name='component_params_size') num_components = dist_util.prefer_static_value(num_components) component_params_size = dist_util.prefer_static_value( component_params_size) return num_components + num_components * component_params_size
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L180-L278
def make_diag_scale(loc=None, scale_diag=None, scale_identity_multiplier=None, shape_hint=None, validate_args=False, assert_positive=False, name=None, dtype=None): """ """ def _maybe_attach_assertion(x): if not validate_args: return x if assert_positive: return with_dependencies([ assert_util.assert_positive( x, message="diagonal part must be positive"), ], x) return with_dependencies([ assert_util.assert_none_equal( x, tf.zeros([], x.dtype), message="diagonal part must be non-zero") ], x) with tf.name_scope(name or "make_diag_scale"): if dtype is None: dtype = dtype_util.common_dtype( [loc, scale_diag, scale_identity_multiplier], preferred_dtype=tf.float32) loc = _convert_to_tensor(loc, name="loc", dtype=dtype) scale_diag = _convert_to_tensor(scale_diag, name="scale_diag", dtype=dtype) scale_identity_multiplier = _convert_to_tensor( scale_identity_multiplier, name="scale_identity_multiplier", dtype=dtype) if scale_diag is not None: if scale_identity_multiplier is not None: scale_diag += scale_identity_multiplier[..., tf.newaxis] return tf.linalg.LinearOperatorDiag( diag=_maybe_attach_assertion(scale_diag), is_non_singular=True, is_self_adjoint=True, is_positive_definite=assert_positive) if loc is None and shape_hint is None: raise ValueError("Cannot infer `event_shape` unless `loc` or " "`shape_hint` is specified.") num_rows = shape_hint del shape_hint if num_rows is None: num_rows = tf.compat.dimension_value(loc.shape[-1]) if num_rows is None: num_rows = tf.shape(input=loc)[-1] if scale_identity_multiplier is None: return tf.linalg.LinearOperatorIdentity( num_rows=num_rows, dtype=dtype, is_self_adjoint=True, is_positive_definite=True, assert_proper_shapes=validate_args) return tf.linalg.LinearOperatorScaledIdentity( num_rows=num_rows, multiplier=_maybe_attach_assertion(scale_identity_multiplier), is_non_singular=True, is_self_adjoint=True, is_positive_definite=assert_positive, assert_proper_shapes=validate_args)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L424-L455
def refresh_from_db(self, session=None, lock_for_update=False): """ """ TI = TaskInstance qry = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date) if lock_for_update: ti = qry.with_for_update().first() else: ti = qry.first() if ti: self.state = ti.state self.start_date = ti.start_date self.end_date = ti.end_date # Get the raw value of try_number column, don't read through the # accessor here otherwise it will be incremeneted by one already. self.try_number = ti._try_number self.max_tries = ti.max_tries self.hostname = ti.hostname self.pid = ti.pid self.executor_config = ti.executor_config else: self.state = None
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/slice_sampler_kernel.py#L555-L580
def _right_pad(x, final_rank): """ """ padded_shape = tf.concat( [tf.shape(input=x), tf.ones(final_rank - tf.rank(x), dtype=tf.int32)], axis=0) static_padded_shape = None if x.shape.is_fully_defined() and isinstance(final_rank, int): static_padded_shape = x.shape.as_list() extra_dims = final_rank - len(static_padded_shape) static_padded_shape.extend([1] * extra_dims) padded_x = tf.reshape(x, static_padded_shape or padded_shape) return padded_x
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/mnist.py#L262-L304
def download(self): """""" import shutil import zipfile if self._check_exists(): return makedir_exist_ok(self.raw_folder) makedir_exist_ok(self.processed_folder) # download files filename = self.url.rpartition('/')[2] file_path = os.path.join(self.raw_folder, filename) download_url(self.url, root=self.raw_folder, filename=filename, md5=None) print('Extracting zip archive') with zipfile.ZipFile(file_path) as zip_f: zip_f.extractall(self.raw_folder) os.unlink(file_path) gzip_folder = os.path.join(self.raw_folder, 'gzip') for gzip_file in os.listdir(gzip_folder): if gzip_file.endswith('.gz'): self.extract_gzip(gzip_path=os.path.join(gzip_folder, gzip_file)) # process and save as torch files for split in self.splits: print('Processing ' + split) training_set = ( read_image_file(os.path.join(gzip_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))), read_label_file(os.path.join(gzip_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split))) ) test_set = ( read_image_file(os.path.join(gzip_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))), read_label_file(os.path.join(gzip_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split))) ) with open(os.path.join(self.processed_folder, self._training_file(split)), 'wb') as f: torch.save(training_set, f) with open(os.path.join(self.processed_folder, self._test_file(split)), 'wb') as f: torch.save(test_set, f) shutil.rmtree(gzip_folder) print('Done!')
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L264-L281
def zero_state(self, sample_batch_shape=()): """ """ h0 = tf.zeros([1, self.hidden_size]) c0 = tf.zeros([1, self.hidden_size]) combined_shape = tf.concat((tf.convert_to_tensor( value=sample_batch_shape, dtype=tf.int32), [self.dimensions]), axis=-1) previous_output = tf.zeros(combined_shape) return previous_output, (h0, c0)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/distribution_util.py#L1799-L1849
def softplus_inverse(x, name=None): """ """ with tf.name_scope(name or "softplus_inverse"): x = tf.convert_to_tensor(value=x, name="x") # We begin by deriving a more numerically stable softplus_inverse: # x = softplus(y) = Log[1 + exp{y}], (which means x > 0). # ==> exp{x} = 1 + exp{y} (1) # ==> y = Log[exp{x} - 1] (2) # = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}] # = Log[(1 - exp{-x}) / 1] + Log[exp{x}] # = Log[1 - exp{-x}] + x (3) # (2) is the "obvious" inverse, but (3) is more stable than (2) for large x. # For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will # be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0. # # In addition to the numerically stable derivation above, we clamp # small/large values to be congruent with the logic in: # tensorflow/core/kernels/softplus_op.h # # Finally, we set the input to one whenever the input is too large or too # small. This ensures that no unchosen codepath is +/- inf. This is # necessary to ensure the gradient doesn't get NaNs. Recall that the # gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false` # thus an `inf` in an unselected path results in `0*inf=nan`. We are careful # to overwrite `x` with ones only when we will never actually use this # value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`. threshold = np.log(np.finfo(dtype_util.as_numpy_dtype(x.dtype)).eps) + 2. is_too_small = tf.less(x, np.exp(threshold)) is_too_large = tf.greater(x, -threshold) too_small_value = tf.math.log(x) too_large_value = x # This `where` will ultimately be a NOP because we won't select this # codepath whenever we used the surrogate `ones_like`. x = tf.where(tf.logical_or(is_too_small, is_too_large), tf.ones_like(x), x) y = x + tf.math.log(-tf.math.expm1(-x)) # == log(expm1(x)) return tf.where(is_too_small, too_small_value, tf.where(is_too_large, too_large_value, y))