_id
stringlengths
98
184
text
stringlengths
91
10.9k
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L366-L438
def create_custom_dag_permission_view(self, session=None): """ """ self.log.debug('Fetching a set of all permission, view_menu from FAB meta-table') def merge_pv(perm, view_menu): """Create permission view menu only if it doesn't exist""" if view_menu and perm and (view_menu, perm) not in all_pvs: self._merge_perm(perm, view_menu) all_pvs = set() for pv in self.get_session.query(self.permissionview_model).all(): if pv.permission and pv.view_menu: all_pvs.add((pv.permission.name, pv.view_menu.name)) # Get all the active / paused dags and insert them into a set all_dags_models = session.query(models.DagModel)\ .filter(or_(models.DagModel.is_active, models.DagModel.is_paused))\ .filter(~models.DagModel.is_subdag).all() # create can_dag_edit and can_dag_read permissions for every dag(vm) for dag in all_dags_models: for perm in self.DAG_PERMS: merge_pv(perm, dag.dag_id) # for all the dag-level role, add the permission of viewer # with the dag view to ab_permission_view all_roles = self.get_all_roles() user_role = self.find_role('User') dag_role = [role for role in all_roles if role.name not in EXISTING_ROLES] update_perm_views = [] # need to remove all_dag vm from all the existing view-menus dag_vm = self.find_view_menu('all_dags') ab_perm_view_role = sqla_models.assoc_permissionview_role perm_view = self.permissionview_model view_menu = self.viewmenu_model all_perm_view_by_user = session.query(ab_perm_view_role)\ .join(perm_view, perm_view.id == ab_perm_view_role .columns.permission_view_id)\ .filter(ab_perm_view_role.columns.role_id == user_role.id)\ .join(view_menu)\ .filter(perm_view.view_menu_id != dag_vm.id) all_perm_views = set([role.permission_view_id for role in all_perm_view_by_user]) for role in dag_role: # Get all the perm-view of the role existing_perm_view_by_user = self.get_session.query(ab_perm_view_role)\ .filter(ab_perm_view_role.columns.role_id == role.id) existing_perms_views = set([pv.permission_view_id for pv in existing_perm_view_by_user]) missing_perm_views = all_perm_views - existing_perms_views for perm_view_id in missing_perm_views: update_perm_views.append({'permission_view_id': perm_view_id, 'role_id': role.id}) if update_perm_views: self.get_session.execute(ab_perm_view_role.insert(), update_perm_views) self.get_session.commit()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/blockwise.py#L151-L172
def _validate_block_sizes(block_sizes, bijectors, validate_args): """""" block_sizes_shape = block_sizes.shape if tensorshape_util.is_fully_defined(block_sizes_shape): if (tensorshape_util.rank(block_sizes_shape) != 1 or (tensorshape_util.num_elements(block_sizes_shape) != len(bijectors))): raise ValueError( '`block_sizes` must be `None`, or a vector of the same length as ' '`bijectors`. Got a `Tensor` with shape {} and `bijectors` of ' 'length {}'.format(block_sizes_shape, len(bijectors))) return block_sizes elif validate_args: message = ('`block_sizes` must be `None`, or a vector of the same length ' 'as `bijectors`.') with tf.control_dependencies([ assert_util.assert_equal( tf.size(input=block_sizes), len(bijectors), message=message), assert_util.assert_equal(tf.rank(block_sizes), 1) ]): return tf.identity(block_sizes) else: return block_sizes
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L609-L738
def manage_slas(self, dag, session=None): """ """ if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]): self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag) return TI = models.TaskInstance sq = ( session .query( TI.task_id, func.max(TI.execution_date).label('max_ti')) .with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql') .filter(TI.dag_id == dag.dag_id) .filter(or_( TI.state == State.SUCCESS, TI.state == State.SKIPPED)) .filter(TI.task_id.in_(dag.task_ids)) .group_by(TI.task_id).subquery('sq') ) max_tis = session.query(TI).filter( TI.dag_id == dag.dag_id, TI.task_id == sq.c.task_id, TI.execution_date == sq.c.max_ti, ).all() ts = timezone.utcnow() for ti in max_tis: task = dag.get_task(ti.task_id) dttm = ti.execution_date if isinstance(task.sla, timedelta): dttm = dag.following_schedule(dttm) while dttm < timezone.utcnow(): following_schedule = dag.following_schedule(dttm) if following_schedule + task.sla < timezone.utcnow(): session.merge(SlaMiss( task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)) dttm = dag.following_schedule(dttm) session.commit() slas = ( session .query(SlaMiss) .filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa: E712 .all() ) if slas: sla_dates = [sla.execution_date for sla in slas] qry = ( session .query(TI) .filter( TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id ).all() ) blocking_tis = [] for ti in qry: if ti.task_id in dag.task_ids: ti.task = dag.get_task(ti.task_id) blocking_tis.append(ti) else: session.delete(ti) session.commit() task_list = "\n".join([ sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas]) blocking_task_list = "\n".join([ ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis]) # Track whether email or any alert notification sent # We consider email or the alert callback as notifications email_sent = False notification_sent = False if dag.sla_miss_callback: # Execute the alert callback self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ') try: dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis) notification_sent = True except Exception: self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id) email_content = """\ Here's a list of tasks that missed their SLAs: <pre><code>{task_list}\n<code></pre> Blocking tasks: <pre><code>{blocking_task_list}\n{bug}<code></pre> """.format(task_list=task_list, blocking_task_list=blocking_task_list, bug=asciiart.bug) emails = set() for task in dag.tasks: if task.email: if isinstance(task.email, basestring): emails |= set(get_email_address_list(task.email)) elif isinstance(task.email, (list, tuple)): emails |= set(task.email) if emails: try: send_email( emails, "[airflow] SLA miss on DAG=" + dag.dag_id, email_content) email_sent = True notification_sent = True except Exception: self.log.exception("Could not send SLA Miss email notification for" " DAG %s", dag.dag_id) # If we sent any notification, update the sla_miss table if notification_sent: for sla in slas: if email_sent: sla.email_sent = True sla.notification_sent = True session.merge(sla) session.commit()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L246-L262
def get_transfer_operation(self, operation_name): """ """ return ( self.get_conn() .transferOperations() .get(name=operation_name) .execute(num_retries=self.num_retries) )
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/references/classification/utils.py#L30-L41
def synchronize_between_processes(self): """ """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/assert_util.py#L76-L106
def assert_rank_at_most(x, rank, data=None, summarize=None, message=None, name=None): """ """ with tf.compat.v2.name_scope(name or 'assert_rank_at_most'): return tf.compat.v1.assert_less_equal( tf.rank(x), rank, data=data, summarize=summarize, message=message)
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L319-L326
def ungzip(data): """ """ from io import BytesIO import gzip buffer = BytesIO(data) f = gzip.GzipFile(fileobj=buffer) return f.read()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample_halton_sequence.py#L345-L365
def _base_expansion_size(num, bases): """ """ return tf.floor(tf.math.log(num) / tf.math.log(bases)) + 1
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/engines/duden.py#L26-L39
def request(query, params): ''' ''' offset = (params['pageno'] - 1) params['url'] = search_url.format(offset=offset, query=quote(query)) return params
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L86-L117
def secondary_training_status_message(job_description, prev_description): """ """ if job_description is None or job_description.get('SecondaryStatusTransitions') is None\ or len(job_description.get('SecondaryStatusTransitions')) == 0: return '' prev_description_secondary_transitions = prev_description.get('SecondaryStatusTransitions')\ if prev_description is not None else None prev_transitions_num = len(prev_description['SecondaryStatusTransitions'])\ if prev_description_secondary_transitions is not None else 0 current_transitions = job_description['SecondaryStatusTransitions'] transitions_to_print = current_transitions[-1:] if len(current_transitions) == prev_transitions_num else \ current_transitions[prev_transitions_num - len(current_transitions):] status_strs = [] for transition in transitions_to_print: message = transition['StatusMessage'] time_str = timezone.convert_to_utc(job_description['LastModifiedTime']).strftime('%Y-%m-%d %H:%M:%S') status_strs.append('{} {} - {}'.format(time_str, transition['Status'], message)) return '\n'.join(status_strs)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib.py#L42-L61
def softplus_and_shift(x, shift=1e-5, name=None): """ """ with tf.compat.v1.name_scope(name, 'softplus_and_shift', [x, shift]): x = tf.convert_to_tensor(value=x, name='x') y = tf.nn.softplus(x) if shift is not None: y += shift return y
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/task/task_runner/__init__.py#L27-L43
def get_task_runner(local_task_job): """ """ if _TASK_RUNNER == "StandardTaskRunner": return StandardTaskRunner(local_task_job) elif _TASK_RUNNER == "CgroupTaskRunner": from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner return CgroupTaskRunner(local_task_job) else: raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/dingding_hook.py#L65-L74
def _get_endpoint(self): """ """ conn = self.get_connection(self.http_conn_id) token = conn.password if not token: raise AirflowException('Dingding token is requests but get nothing, ' 'check you conn_id configuration.') return 'robot/send?access_token={}'.format(token)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/kubernetes/worker_configuration.py#L188-L202
def _get_security_context(self): """""" security_context = {} if self.kube_config.worker_run_as_user: security_context['runAsUser'] = self.kube_config.worker_run_as_user if self.kube_config.worker_fs_group: security_context['fsGroup'] = self.kube_config.worker_fs_group # set fs_group to 65533 if not explicitly specified and using git ssh keypair auth if self.kube_config.git_ssh_key_secret_name and security_context.get('fsGroup') is None: security_context['fsGroup'] = 65533 return security_context
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/vae.py#L191-L225
def make_encoder(activation, latent_size, base_depth): """ """ conv = functools.partial( tf.keras.layers.Conv2D, padding="SAME", activation=activation) encoder_net = tf.keras.Sequential([ conv(base_depth, 5, 1), conv(base_depth, 5, 2), conv(2 * base_depth, 5, 1), conv(2 * base_depth, 5, 2), conv(4 * latent_size, 7, padding="VALID"), tf.keras.layers.Flatten(), tf.keras.layers.Dense(2 * latent_size, activation=None), ]) def encoder(images): images = 2 * tf.cast(images, dtype=tf.float32) - 1 net = encoder_net(images) return tfd.MultivariateNormalDiag( loc=net[..., :latent_size], scale_diag=tf.nn.softplus(net[..., latent_size:] + _softplus_inverse(1.0)), name="code") return encoder
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mysql_to_gcs.py#L269-L288
def _convert_types(schema, col_type_dict, row): """ """ converted_row = [] for col_name, col_val in zip(schema, row): if type(col_val) in (datetime, date): col_val = time.mktime(col_val.timetuple()) elif isinstance(col_val, Decimal): col_val = float(col_val) elif col_type_dict.get(col_name) == "BYTES": col_val = base64.standard_b64encode(col_val).decode('ascii') else: col_val = col_val converted_row.append(col_val) return converted_row
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/decomposition.py#L29-L37
def _split_covariance_into_marginals(covariance, block_sizes): """""" start_dim = 0 marginals = [] for size in block_sizes: end_dim = start_dim + size marginals.append(covariance[..., start_dim:end_dim, start_dim:end_dim]) start_dim = end_dim return marginals
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/sample.py#L34-L52
def _make_summary_statistic(attr): """""" def _fn(self, **kwargs): """Implements summary statistic, eg, mean, stddev, mode.""" x = getattr(self.distribution, attr)(**kwargs) shape = prefer_static.concat([ self.distribution.batch_shape_tensor(), prefer_static.ones(prefer_static.rank_from_shape(self.sample_shape), dtype=self.sample_shape.dtype), self.distribution.event_shape_tensor(), ], axis=0) x = tf.reshape(x, shape=shape) shape = prefer_static.concat([ self.distribution.batch_shape_tensor(), self.sample_shape, self.distribution.event_shape_tensor(), ], axis=0) return tf.broadcast_to(x, shape) return _fn
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2071-L2101
def _task_instances_for_dag_run(self, dag_run, session=None): """ """ tasks_to_run = {} if dag_run is None: return tasks_to_run # check if we have orphaned tasks self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session) # for some reason if we don't refresh the reference to run is lost dag_run.refresh_from_db() make_transient(dag_run) # TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf for ti in dag_run.get_task_instances(): # all tasks part of the backfill are scheduled to run if ti.state == State.NONE: ti.set_state(State.SCHEDULED, session=session) if ti.state != State.REMOVED: tasks_to_run[ti.key] = ti return tasks_to_run
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L453-L465
def vflip(img): """ """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) return img.transpose(Image.FLIP_TOP_BOTTOM)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/normal_conjugate_posteriors.py#L25-L81
def normal_conjugates_known_scale_posterior(prior, scale, s, n): """ """ if not isinstance(prior, normal.Normal): raise TypeError("Expected prior to be an instance of type Normal") if s.dtype != prior.dtype: raise TypeError( "Observation sum s.dtype does not match prior dtype: %s vs. %s" % (s.dtype, prior.dtype)) n = tf.cast(n, prior.dtype) scale0_2 = tf.square(prior.scale) scale_2 = tf.square(scale) scalep_2 = 1.0/(1/scale0_2 + n/scale_2) return normal.Normal( loc=(prior.loc / scale0_2 + s / scale_2) * scalep_2, scale=tf.sqrt(scalep_2))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L104-L112
def aggregate(self, mongo_collection, aggregate_query, mongo_db=None, **kwargs): """ """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) return collection.aggregate(aggregate_query, **kwargs)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/decomposition.py#L40-L106
def _decompose_from_posterior_marginals( model, posterior_means, posterior_covs, parameter_samples): """ """ try: model.components except AttributeError: raise ValueError('Model decomposed into components must be an instance of' '`tfp.sts.Sum` (passed model {})'.format(model)) with tf.compat.v1.name_scope('decompose_from_posterior_marginals'): # Extract the component means/covs from the joint latent posterior. latent_sizes = [component.latent_size for component in model.components] component_means = tf.split(posterior_means, latent_sizes, axis=-1) component_covs = _split_covariance_into_marginals( posterior_covs, latent_sizes) # Instantiate per-component state space models, and use them to push the # posterior means/covs through the observation model for each component. num_timesteps = dist_util.prefer_static_value( tf.shape(input=posterior_means))[-2] component_ssms = model.make_component_state_space_models( num_timesteps=num_timesteps, param_vals=parameter_samples) component_predictive_dists = collections.OrderedDict() for (component, component_ssm, component_mean, component_cov) in zip(model.components, component_ssms, component_means, component_covs): component_obs_mean, component_obs_cov = ( component_ssm.latents_to_observations( latent_means=component_mean, latent_covs=component_cov)) # Using the observation means and covs, build a mixture distribution # that integrates over the posterior draws. component_predictive_dists[component] = sts_util.mix_over_posterior_draws( means=component_obs_mean[..., 0], variances=component_obs_cov[..., 0, 0]) return component_predictive_dists
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L295-L314
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''): """ """ if not bucket_name: (bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key) prefix = re.split(r'[*]', wildcard_key, 1)[0] klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter) if klist: key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)] if key_matches: return self.get_key(key_matches[0], bucket_name)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L38-L94
def to_tensor(pic): """ """ if not(_is_pil_image(pic) or _is_numpy_image(pic)): raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic))) if isinstance(pic, np.ndarray): # handle numpy array if pic.ndim == 2: pic = pic[:, :, None] img = torch.from_numpy(pic.transpose((2, 0, 1))) # backward compatibility if isinstance(img, torch.ByteTensor): return img.float().div(255) else: return img if accimage is not None and isinstance(pic, accimage.Image): nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32) pic.copyto(nppic) return torch.from_numpy(nppic) # handle PIL Image if pic.mode == 'I': img = torch.from_numpy(np.array(pic, np.int32, copy=False)) elif pic.mode == 'I;16': img = torch.from_numpy(np.array(pic, np.int16, copy=False)) elif pic.mode == 'F': img = torch.from_numpy(np.array(pic, np.float32, copy=False)) elif pic.mode == '1': img = 255 * torch.from_numpy(np.array(pic, np.uint8, copy=False)) else: img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) # PIL image mode: L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK if pic.mode == 'YCbCr': nchannel = 3 elif pic.mode == 'I;16': nchannel = 1 else: nchannel = len(pic.mode) img = img.view(pic.size[1], pic.size[0], nchannel) # put it from HWC to CHW format # yikes, this transpose takes 80% of the loading time/CPU img = img.transpose(0, 1).transpose(0, 2).contiguous() if isinstance(img, torch.ByteTensor): return img.float().div(255) else: return img
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L122-L137
def call_fn(fn: TransitionOperator, args: Union[Tuple[Any], Any]) -> Any: """ """ if isinstance(args, (list, tuple)) and not mcmc_util.is_namedtuple_like(args): args = args # type: Tuple[Any] return fn(*args) else: return fn(args)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/transforms.py#L1064-L1089
def get_params(degrees, translate, scale_ranges, shears, img_size): """ """ angle = random.uniform(degrees[0], degrees[1]) if translate is not None: max_dx = translate[0] * img_size[0] max_dy = translate[1] * img_size[1] translations = (np.round(random.uniform(-max_dx, max_dx)), np.round(random.uniform(-max_dy, max_dy))) else: translations = (0, 0) if scale_ranges is not None: scale = random.uniform(scale_ranges[0], scale_ranges[1]) else: scale = 1.0 if shears is not None: shear = random.uniform(shears[0], shears[1]) else: shear = 0.0 return angle, translations, scale, shear
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/poisson_lognormal.py#L88-L152
def quadrature_scheme_lognormal_quantiles( loc, scale, quadrature_size, validate_args=False, name=None): """ """ with tf.name_scope(name or "quadrature_scheme_lognormal_quantiles"): # Create a LogNormal distribution. dist = transformed_distribution.TransformedDistribution( distribution=normal.Normal(loc=loc, scale=scale), bijector=exp_bijector.Exp(), validate_args=validate_args) batch_ndims = tensorshape_util.rank(dist.batch_shape) if batch_ndims is None: batch_ndims = tf.shape(input=dist.batch_shape_tensor())[0] def _compute_quantiles(): """Helper to build quantiles.""" # Omit {0, 1} since they might lead to Inf/NaN. zero = tf.zeros([], dtype=dist.dtype) edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1] # Expand edges so its broadcast across batch dims. edges = tf.reshape( edges, shape=tf.concat( [[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0)) quantiles = dist.quantile(edges) # Cyclically permute left by one. perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0) quantiles = tf.transpose(a=quantiles, perm=perm) return quantiles quantiles = _compute_quantiles() # Compute grid as quantile midpoints. grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2. # Set shape hints. new_shape = tensorshape_util.concatenate(dist.batch_shape, [quadrature_size]) tensorshape_util.set_shape(grid, new_shape) # By construction probs is constant, i.e., `1 / quadrature_size`. This is # important, because non-constant probs leads to non-reparameterizable # samples. probs = tf.fill( dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype)) return grid, probs
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L186-L212
def replace_one(self, mongo_collection, doc, filter_doc=None, mongo_db=None, **kwargs): """ """ collection = self.get_collection(mongo_collection, mongo_db=mongo_db) if not filter_doc: filter_doc = {'_id': doc['_id']} return collection.replace_one(filter_doc, doc, **kwargs)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/batch_reshape.py#L294-L377
def _validate_sample_arg(self, x): """""" with tf.name_scope("validate_sample_arg"): x_ndims = ( tf.rank(x) if tensorshape_util.rank(x.shape) is None else tensorshape_util.rank(x.shape)) event_ndims = ( tf.size(input=self.event_shape_tensor()) if tensorshape_util.rank(self.event_shape) is None else tensorshape_util.rank(self.event_shape)) batch_ndims = ( tf.size(input=self._batch_shape_unexpanded) if tensorshape_util.rank(self.batch_shape) is None else tensorshape_util.rank(self.batch_shape)) expected_batch_event_ndims = batch_ndims + event_ndims if (isinstance(x_ndims, int) and isinstance(expected_batch_event_ndims, int)): if x_ndims < expected_batch_event_ndims: raise NotImplementedError( "Broadcasting is not supported; too few batch and event dims " "(expected at least {}, saw {}).".format( expected_batch_event_ndims, x_ndims)) ndims_assertion = [] elif self.validate_args: ndims_assertion = [ assert_util.assert_greater_equal( x_ndims, expected_batch_event_ndims, message=("Broadcasting is not supported; too few " "batch and event dims."), name="assert_batch_and_event_ndims_large_enough"), ] if (tensorshape_util.is_fully_defined(self.batch_shape) and tensorshape_util.is_fully_defined(self.event_shape)): expected_batch_event_shape = np.int32( tensorshape_util.concatenate(self.batch_shape, self.event_shape)) else: expected_batch_event_shape = tf.concat( [ self.batch_shape_tensor(), self.event_shape_tensor(), ], axis=0) sample_ndims = x_ndims - expected_batch_event_ndims if isinstance(sample_ndims, int): sample_ndims = max(sample_ndims, 0) if (isinstance(sample_ndims, int) and tensorshape_util.is_fully_defined(x.shape[sample_ndims:])): actual_batch_event_shape = np.int32(x.shape[sample_ndims:]) else: sample_ndims = tf.maximum(sample_ndims, 0) actual_batch_event_shape = tf.shape(input=x)[sample_ndims:] if (isinstance(expected_batch_event_shape, np.ndarray) and isinstance(actual_batch_event_shape, np.ndarray)): if any(expected_batch_event_shape != actual_batch_event_shape): raise NotImplementedError("Broadcasting is not supported; " "unexpected batch and event shape " "(expected {}, saw {}).".format( expected_batch_event_shape, actual_batch_event_shape)) # We need to set the final runtime-assertions to `ndims_assertion` since # its possible this assertion was created. We could add a condition to # only do so if `self.validate_args == True`, however this is redundant # as `ndims_assertion` already encodes this information. runtime_assertions = ndims_assertion elif self.validate_args: # We need to make the `ndims_assertion` a control dep because otherwise # TF itself might raise an exception owing to this assertion being # ill-defined, ie, one cannot even compare different rank Tensors. with tf.control_dependencies(ndims_assertion): shape_assertion = assert_util.assert_equal( expected_batch_event_shape, actual_batch_event_shape, message=("Broadcasting is not supported; " "unexpected batch and event shape."), name="assert_batch_and_event_shape_same") runtime_assertions = [shape_assertion] else: runtime_assertions = [] return runtime_assertions
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/internal/util.py#L296-L329
def canonicalize_observed_time_series_with_mask( maybe_masked_observed_time_series): """ """ with tf.compat.v1.name_scope('canonicalize_observed_time_series_with_mask'): if hasattr(maybe_masked_observed_time_series, 'is_missing'): observed_time_series = ( maybe_masked_observed_time_series.time_series) is_missing = maybe_masked_observed_time_series.is_missing else: observed_time_series = maybe_masked_observed_time_series is_missing = None observed_time_series = tf.convert_to_tensor(value=observed_time_series, name='observed_time_series') observed_time_series = _maybe_expand_trailing_dim(observed_time_series) if is_missing is not None: is_missing = tf.convert_to_tensor( value=is_missing, name='is_missing', dtype_hint=tf.bool) return missing_values_util.MaskedTimeSeries(observed_time_series, is_missing=is_missing)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/spark_submit_hook.py#L378-L429
def _process_spark_submit_log(self, itr): """ """ # Consume the iterator for line in itr: line = line.strip() # If we run yarn cluster mode, we want to extract the application id from # the logs so we can kill the application when we stop it unexpectedly if self._is_yarn and self._connection['deploy_mode'] == 'cluster': match = re.search('(application[0-9_]+)', line) if match: self._yarn_application_id = match.groups()[0] self.log.info("Identified spark driver id: %s", self._yarn_application_id) # If we run Kubernetes cluster mode, we want to extract the driver pod id # from the logs so we can kill the application when we stop it unexpectedly elif self._is_kubernetes: match = re.search(r'\s*pod name: ((.+?)-([a-z0-9]+)-driver)', line) if match: self._kubernetes_driver_pod = match.groups()[0] self.log.info("Identified spark driver pod: %s", self._kubernetes_driver_pod) # Store the Spark Exit code match_exit_code = re.search(r'\s*Exit code: (\d+)', line) if match_exit_code: self._spark_exit_code = int(match_exit_code.groups()[0]) # if we run in standalone cluster mode and we want to track the driver status # we need to extract the driver id from the logs. This allows us to poll for # the status using the driver id. Also, we can kill the driver when needed. elif self._should_track_driver_status and not self._driver_id: match_driver_id = re.search(r'(driver-[0-9\-]+)', line) if match_driver_id: self._driver_id = match_driver_id.groups()[0] self.log.info("identified spark driver id: {}" .format(self._driver_id)) else: self.log.info(line) self.log.debug("spark submit log: {}".format(line))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_natural_language_hook.py#L198-L217
def classify_text(self, document, retry=None, timeout=None, metadata=None): """ """ client = self.get_conn() return client.classify_text(document=document, retry=retry, timeout=timeout, metadata=metadata)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L184-L209
def get_database(self, instance_id, database_id, project_id=None): """ """ instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) if not database.exists(): return None else: return database
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_api_base_hook.py#L148-L159
def _get_field(self, f, default=None): """ """ long_f = 'extra__google_cloud_platform__{}'.format(f) if hasattr(self, 'extras') and long_f in self.extras: return self.extras[long_f] else: return default
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/program_transformations.py#L34-L135
def make_value_setter(**model_kwargs): """ """ def set_values(f, *args, **kwargs): """Sets random variable values to its aligned value.""" name = kwargs.get("name") if name in model_kwargs: kwargs["value"] = model_kwargs[name] return interceptable(f)(*args, **kwargs) return set_values
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sqoop_hook.py#L235-L256
def import_query(self, query, target_dir, append=False, file_type="text", split_by=None, direct=None, driver=None, extra_import_options=None): """ """ cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options) cmd += ["--query", query] self.Popen(cmd)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/quantized_distribution.py#L372-L394
def _log_prob_with_logsf_and_logcdf(self, y): """""" # There are two options that would be equal if we had infinite precision: # Log[ sf(y - 1) - sf(y) ] # = Log[ exp{logsf(y - 1)} - exp{logsf(y)} ] # Log[ cdf(y) - cdf(y - 1) ] # = Log[ exp{logcdf(y)} - exp{logcdf(y - 1)} ] logsf_y = self.log_survival_function(y) logsf_y_minus_1 = self.log_survival_function(y - 1) logcdf_y = self.log_cdf(y) logcdf_y_minus_1 = self.log_cdf(y - 1) # Important: Here we use select in a way such that no input is inf, this # prevents the troublesome case where the output of select can be finite, # but the output of grad(select) will be NaN. # In either case, we are doing Log[ exp{big} - exp{small} ] # We want to use the sf items precisely when we are on the right side of the # median, which occurs when logsf_y < logcdf_y. big = tf.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y) small = tf.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1) return _logsum_expbig_minus_expsmall(big, small)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/normal.py#L234-L237
def _inv_z(self, z): """""" with tf.name_scope("reconstruct"): return z * self.scale + self.loc
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py#L579-L650
def _prepare_args(value_and_gradients_function, initial_step_size, val_initial, val_0, approximate_wolfe_threshold): """ """ eval_count = 0 if val_initial is None: if initial_step_size is not None: initial_step_size = tf.convert_to_tensor(value=initial_step_size) else: initial_step_size = tf.convert_to_tensor(value=1.0, dtype=tf.float32) val_initial = value_and_gradients_function(initial_step_size) eval_count += 1 if val_0 is None: x_0 = tf.zeros_like(val_initial.x) val_0 = value_and_gradients_function(x_0) eval_count += 1 f_lim = val_0.f + (approximate_wolfe_threshold * tf.abs(val_0.f)) return val_0, val_initial, f_lim, tf.convert_to_tensor(value=eval_count)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/dirichlet.py#L331-L403
def _kl_dirichlet_dirichlet(d1, d2, name=None): """ """ with tf.name_scope(name or "kl_dirichlet_dirichlet"): # The KL between Dirichlet distributions can be derived as follows. We have # # Dir(x; a) = 1 / B(a) * prod_i[x[i]^(a[i] - 1)] # # where B(a) is the multivariate Beta function: # # B(a) = Gamma(a[1]) * ... * Gamma(a[n]) / Gamma(a[1] + ... + a[n]) # # The KL is # # KL(Dir(x; a), Dir(x; b)) = E_Dir(x; a){log(Dir(x; a) / Dir(x; b))} # # so we'll need to know the log density of the Dirichlet. This is # # log(Dir(x; a)) = sum_i[(a[i] - 1) log(x[i])] - log B(a) # # The only term that matters for the expectations is the log(x[i]). To # compute the expectation of this term over the Dirichlet density, we can # use the following facts about the Dirichlet in exponential family form: # 1. log(x[i]) is a sufficient statistic # 2. expected sufficient statistics (of any exp family distribution) are # equal to derivatives of the log normalizer with respect to # corresponding natural parameters: E{T[i](x)} = dA/d(eta[i]) # # To proceed, we can rewrite the Dirichlet density in exponential family # form as follows: # # Dir(x; a) = exp{eta(a) . T(x) - A(a)} # # where '.' is the dot product of vectors eta and T, and A is a scalar: # # eta[i](a) = a[i] - 1 # T[i](x) = log(x[i]) # A(a) = log B(a) # # Now, we can use fact (2) above to write # # E_Dir(x; a)[log(x[i])] # = dA(a) / da[i] # = d/da[i] log B(a) # = d/da[i] (sum_j lgamma(a[j])) - lgamma(sum_j a[j]) # = digamma(a[i])) - digamma(sum_j a[j]) # # Putting it all together, we have # # KL[Dir(x; a) || Dir(x; b)] # = E_Dir(x; a){log(Dir(x; a) / Dir(x; b)} # = E_Dir(x; a){sum_i[(a[i] - b[i]) log(x[i])} - (lbeta(a) - lbeta(b)) # = sum_i[(a[i] - b[i]) * E_Dir(x; a){log(x[i])}] - lbeta(a) + lbeta(b) # = sum_i[(a[i] - b[i]) * (digamma(a[i]) - digamma(sum_j a[j]))] # - lbeta(a) + lbeta(b)) digamma_sum_d1 = tf.math.digamma( tf.reduce_sum(input_tensor=d1.concentration, axis=-1, keepdims=True)) digamma_diff = tf.math.digamma(d1.concentration) - digamma_sum_d1 concentration_diff = d1.concentration - d2.concentration return ( tf.reduce_sum(input_tensor=concentration_diff * digamma_diff, axis=-1) - tf.math.lbeta(d1.concentration) + tf.math.lbeta(d2.concentration))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/wasb_hook.py#L66-L82
def check_for_prefix(self, container_name, prefix, **kwargs): """ """ matches = self.connection.list_blobs(container_name, prefix, num_results=1, **kwargs) return len(list(matches)) > 0
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/migrations/env.py#L48-L65
def run_migrations_offline(): """ """ context.configure( url=settings.SQL_ALCHEMY_CONN, target_metadata=target_metadata, literal_binds=True, compare_type=COMPARE_TYPE) with context.begin_transaction(): context.run_migrations()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_pubsub_hook.py#L112-L137
def delete_topic(self, project, topic, fail_if_not_exists=False): """ """ service = self.get_conn() full_topic = _format_topic(project, topic) try: service.projects().topics().delete(topic=full_topic).execute(num_retries=self.num_retries) except HttpError as e: # Status code 409 indicates that the topic was not found if str(e.resp['status']) == '404': message = 'Topic does not exist: {}'.format(full_topic) self.log.warning(message) if fail_if_not_exists: raise PubSubException(message) else: raise PubSubException( 'Error deleting topic {}'.format(full_topic), e)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/langevin.py#L630-L688
def _euler_method(random_draw_parts, state_parts, drift_parts, step_size_parts, volatility_parts, name=None): """ """ with tf.compat.v1.name_scope(name, 'mala_euler_method', [ random_draw_parts, state_parts, drift_parts, step_size_parts, volatility_parts ]): proposed_state_parts = [] for random_draw, state, drift, step_size, volatility in zip( random_draw_parts, state_parts, drift_parts, step_size_parts, volatility_parts): proposal = state + drift + volatility * tf.sqrt(step_size) * random_draw proposed_state_parts.append(proposal) return proposed_state_parts
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L674-L687
def get_dagrun(self, session): """ """ from airflow.models.dagrun import DagRun # Avoid circular import dr = session.query(DagRun).filter( DagRun.dag_id == self.dag_id, DagRun.execution_date == self.execution_date ).first() return dr
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L179-L294
def add_serie(self, y, x, name=None, extra=None, **kwargs): """ """ if not name: name = "Serie %d" % (self.serie_no) # For scatterChart shape & size fields are added in serie if 'shape' in kwargs or 'size' in kwargs: csize = kwargs.get('size', 1) cshape = kwargs.get('shape', 'circle') serie = [{ 'x': x[i], 'y': j, 'shape': cshape, 'size': csize[i] if isinstance(csize, list) else csize } for i, j in enumerate(y)] else: if self.model == 'pieChart': serie = [{'label': x[i], 'value': y} for i, y in enumerate(y)] else: serie = [{'x': x[i], 'y': y} for i, y in enumerate(y)] data_keyvalue = {'values': serie, 'key': name} # multiChart # Histogram type='bar' for the series if 'type' in kwargs and kwargs['type']: data_keyvalue['type'] = kwargs['type'] # Define on which Y axis the serie is related # a chart can have 2 Y axis, left and right, by default only one Y Axis is used if 'yaxis' in kwargs and kwargs['yaxis']: data_keyvalue['yAxis'] = kwargs['yaxis'] else: if self.model != 'pieChart': data_keyvalue['yAxis'] = '1' if 'bar' in kwargs and kwargs['bar']: data_keyvalue['bar'] = 'true' if 'disabled' in kwargs and kwargs['disabled']: data_keyvalue['disabled'] = 'true' if 'color' in kwargs and kwargs['color']: data_keyvalue['color'] = kwargs['color'] if extra: if self.model == 'pieChart': if 'color_list' in extra and extra['color_list']: self.color_list = extra['color_list'] if extra.get('date_format'): self.charttooltip_dateformat = extra['date_format'] if extra.get('tooltip'): self.custom_tooltip_flag = True if self.model != 'pieChart': _start = extra['tooltip']['y_start'] _end = extra['tooltip']['y_end'] _start = ("'" + str(_start) + "' + ") if _start else '' _end = (" + '" + str(_end) + "'") if _end else '' if self.model == 'linePlusBarChart': if self.tooltip_condition_string: self.tooltip_condition_string += stab(5) self.tooltip_condition_string += stab(0) + "if(key.indexOf('" + name + "') > -1 ){\n" +\ stab(6) + "var y = " + _start + " String(graph.point.y) " + _end + ";\n" +\ stab(5) + "}\n" elif self.model == 'cumulativeLineChart': self.tooltip_condition_string += stab(0) + "if(key == '" + name + "'){\n" +\ stab(6) + "var y = " + _start + " String(e) " + _end + ";\n" +\ stab(5) + "}\n" else: self.tooltip_condition_string += stab(5) + "if(key == '" + name + "'){\n" +\ stab(6) + "var y = " + _start + " String(graph.point.y) " + _end + ";\n" +\ stab(5) + "}\n" if self.model == 'pieChart': _start = extra['tooltip']['y_start'] _end = extra['tooltip']['y_end'] _start = ("'" + str(_start) + "' + ") if _start else '' _end = (" + '" + str(_end) + "'") if _end else '' self.tooltip_condition_string += "var y = " + _start + " String(y) " + _end + ";\n" # Increment series counter & append self.serie_no += 1 self.series.append(data_keyvalue)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L221-L234
def get_user_roles(self, user=None): """ """ if user is None: user = g.user if user.is_anonymous: public_role = appbuilder.config.get('AUTH_ROLE_PUBLIC') return [appbuilder.security_manager.find_role(public_role)] \ if public_role else [] return user.roles
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L109-L145
def list_prefixes(self, bucket_name, prefix='', delimiter='', page_size=None, max_items=None): """ """ config = { 'PageSize': page_size, 'MaxItems': max_items, } paginator = self.get_conn().get_paginator('list_objects_v2') response = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter=delimiter, PaginationConfig=config) has_results = False prefixes = [] for page in response: if 'CommonPrefixes' in page: has_results = True for p in page['CommonPrefixes']: prefixes.append(p['Prefix']) if has_results: return prefixes
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L211-L222
def delete_product_set( self, location, product_set_id, project_id=None, retry=None, timeout=None, metadata=None ): """ """ client = self.get_conn() name = ProductSearchClient.product_set_path(project_id, location, product_set_id) self.log.info('Deleting ProductSet: %s', name) client.delete_product_set(name=name, retry=retry, timeout=timeout, metadata=metadata) self.log.info('ProductSet with the name [%s] deleted.', name)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1421-L1468
def run_table_upsert(self, dataset_id, table_resource, project_id=None): """ """ # check to see if the table exists table_id = table_resource['tableReference']['tableId'] project_id = project_id if project_id is not None else self.project_id tables_list_resp = self.service.tables().list( projectId=project_id, datasetId=dataset_id).execute(num_retries=self.num_retries) while True: for table in tables_list_resp.get('tables', []): if table['tableReference']['tableId'] == table_id: # found the table, do update self.log.info('Table %s:%s.%s exists, updating.', project_id, dataset_id, table_id) return self.service.tables().update( projectId=project_id, datasetId=dataset_id, tableId=table_id, body=table_resource).execute(num_retries=self.num_retries) # If there is a next page, we need to check the next page. if 'nextPageToken' in tables_list_resp: tables_list_resp = self.service.tables()\ .list(projectId=project_id, datasetId=dataset_id, pageToken=tables_list_resp['nextPageToken'])\ .execute(num_retries=self.num_retries) # If there is no next page, then the table doesn't exist. else: # do insert self.log.info('Table %s:%s.%s does not exist. creating.', project_id, dataset_id, table_id) return self.service.tables().insert( projectId=project_id, datasetId=dataset_id, body=table_resource).execute(num_retries=self.num_retries)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/docs/exts/docroles.py#L27-L55
def get_template_field(env, fullname): """ """ modname, classname = fullname.rsplit(".", 1) try: with mock(env.config.autodoc_mock_imports): mod = import_module(modname) except ImportError: raise RoleException("Error loading %s module." % (modname, )) clazz = getattr(mod, classname) if not clazz: raise RoleException("Error finding %s class in %s module." % (classname, modname)) template_fields = getattr(clazz, "template_fields") if not template_fields: raise RoleException( "Could not find the template fields for %s class in %s module." % (classname, modname) ) return list(template_fields)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/batch_reshape.py#L234-L262
def _call_reshape_input_output(self, fn, x, extra_kwargs=None): """""" # Note: we take `extra_kwargs` as a dict rather than `**extra_kwargs` # because it is possible the user provided extra kwargs would itself # have `fn` and/or `x` as a key. with tf.control_dependencies(self._runtime_assertions + self._validate_sample_arg(x)): sample_shape, static_sample_shape = self._sample_shape(x) old_shape = tf.concat( [ sample_shape, self.distribution.batch_shape_tensor(), self.event_shape_tensor(), ], axis=0) x_reshape = tf.reshape(x, old_shape) result = fn(x_reshape, **extra_kwargs) if extra_kwargs else fn(x_reshape) new_shape = tf.concat( [ sample_shape, self._batch_shape_unexpanded, ], axis=0) result = tf.reshape(result, new_shape) if (tensorshape_util.rank(static_sample_shape) is not None and tensorshape_util.rank(self.batch_shape) is not None): new_shape = tensorshape_util.concatenate(static_sample_shape, self.batch_shape) tensorshape_util.set_shape(result, new_shape) return result
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/log.py#L72-L74
def print_log(text, *colors): """""" sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n")
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/get_dag_runs.py#L25-L55
def get_dag_runs(dag_id, state=None): """ """ dagbag = DagBag() # Check DAG exists. if dag_id not in dagbag.dags: error_message = "Dag id {} not found".format(dag_id) raise AirflowException(error_message) dag_runs = list() state = state.lower() if state else None for run in DagRun.find(dag_id=dag_id, state=state): dag_runs.append({ 'id': run.id, 'run_id': run.run_id, 'state': run.state, 'dag_id': run.dag_id, 'execution_date': run.execution_date.isoformat(), 'start_date': ((run.start_date or '') and run.start_date.isoformat()), 'dag_run_url': url_for('Airflow.graph', dag_id=run.dag_id, execution_date=run.execution_date) }) return dag_runs
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/backend/numpy/misc.py#L60-L71
def _sort(values, axis=-1, direction='ASCENDING', stable=False, name=None): # pylint: disable=unused-argument """""" if direction == 'ASCENDING': pass elif direction == 'DESCENDING': values = np.negative(values) else: raise ValueError('Unrecognized direction: {}.'.format(direction)) result = np.sort(values, axis, kind='stable' if stable else 'quicksort') if direction == 'DESCENDING': return np.negative(result) return result
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/diag_jacobian.py#L32-L248
def diag_jacobian(xs, ys=None, sample_shape=None, fn=None, parallel_iterations=10, name=None): """ """ with tf.compat.v1.name_scope(name, 'jacobians_diag', [xs, ys]): if sample_shape is None: sample_shape = [1] # Output Jacobian diagonal jacobians_diag_res = [] # Convert input `xs` to a list xs = list(xs) if _is_list_like(xs) else [xs] xs = [tf.convert_to_tensor(value=x) for x in xs] if not tf.executing_eagerly(): if ys is None: if fn is None: raise ValueError('Both `ys` and `fn` can not be `None`') else: ys = fn(*xs) # Convert ys to a list ys = list(ys) if _is_list_like(ys) else [ys] if len(xs) != len(ys): raise ValueError('`xs` and `ys` should have the same length') for y, x in zip(ys, xs): # Broadcast `y` to the shape of `x`. y_ = y + tf.zeros_like(x) # Change `event_shape` to one-dimension y_ = tf.reshape(y, tf.concat([sample_shape, [-1]], -1)) # Declare an iterator and tensor array loop variables for the gradients. n = tf.size(input=x) / tf.cast( tf.reduce_prod(input_tensor=sample_shape), dtype=tf.int32) n = tf.cast(n, dtype=tf.int32) loop_vars = [ 0, tf.TensorArray(x.dtype, n) ] def loop_body(j): """Loop function to compute gradients of the each direction.""" # Gradient along direction `j`. res = tf.gradients(ys=y_[..., j], xs=x)[0] # pylint: disable=cell-var-from-loop if res is None: # Return zero, if the gradient is `None`. res = tf.zeros(tf.concat([sample_shape, [1]], -1), dtype=x.dtype) # pylint: disable=cell-var-from-loop else: # Reshape `event_shape` to 1D res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1)) # Add artificial dimension for the case of zero shape input tensor res = tf.expand_dims(res, 0) res = res[..., j] return res # pylint: disable=cell-var-from-loop # Iterate over all elements of the gradient and compute second order # derivatives. _, jacobian_diag_res = tf.while_loop( cond=lambda j, _: j < n, # pylint: disable=cell-var-from-loop body=lambda j, result: (j + 1, result.write(j, loop_body(j))), loop_vars=loop_vars, parallel_iterations=parallel_iterations) shape_x = tf.shape(input=x) # Stack gradients together and move flattened `event_shape` to the # zero position reshaped_jacobian_diag = tf.transpose(a=jacobian_diag_res.stack()) # Reshape to the original tensor reshaped_jacobian_diag = tf.reshape(reshaped_jacobian_diag, shape_x) jacobians_diag_res.append(reshaped_jacobian_diag) else: if fn is None: raise ValueError('`fn` can not be `None` when eager execution is ' 'enabled') if ys is None: ys = fn(*xs) def fn_slice(i, j): """Broadcast y[i], flatten event shape of y[i], return y[i][..., j].""" def fn_broadcast(*state): res = fn(*state) res = list(res) if _is_list_like(res) else [res] if len(res) != len(state): res *= len(state) res = [tf.reshape(r + tf.zeros_like(s), tf.concat([sample_shape, [-1]], -1)) for r, s in zip(res, state)] return res # Expand dimensions before returning in order to support 0D input `xs` return lambda *state: tf.expand_dims(fn_broadcast(*state)[i], 0)[..., j] def make_loop_body(i, x): """Loop function to compute gradients of the each direction.""" def _fn(j, result): res = value_and_gradient(fn_slice(i, j), xs)[1][i] if res is None: res = tf.zeros(tf.concat([sample_shape, [1]], -1), dtype=x.dtype) else: res = tf.reshape(res, tf.concat([sample_shape, [-1]], -1)) res = res[..., j] return j + 1, result.write(j, res) return _fn for i, x in enumerate(xs): # Declare an iterator and tensor array loop variables for the gradients. n = tf.size(input=x) / tf.cast( tf.reduce_prod(input_tensor=sample_shape), dtype=tf.int32) n = tf.cast(n, dtype=tf.int32) loop_vars = [ 0, tf.TensorArray(x.dtype, n) ] # Iterate over all elements of the gradient and compute second order # derivatives. _, jacobian_diag_res = tf.while_loop( cond=lambda j, _: j < n, body=make_loop_body(i, x), loop_vars=loop_vars, parallel_iterations=parallel_iterations) shape_x = tf.shape(input=x) # Stack gradients together and move flattened `event_shape` to the # zero position reshaped_jacobian_diag = tf.transpose(a=jacobian_diag_res.stack()) # Reshape to the original tensor reshaped_jacobian_diag = tf.reshape(reshaped_jacobian_diag, shape_x) jacobians_diag_res.append(reshaped_jacobian_diag) return ys, jacobians_diag_res
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/security.py#L236-L244
def get_all_permissions_views(self): """ """ perms_views = set() for role in self.get_user_roles(): perms_views.update({(perm_view.permission.name, perm_view.view_menu.name) for perm_view in role.permissions}) return perms_views
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L459-L518
def copy_object(self, source_bucket_key, dest_bucket_key, source_bucket_name=None, dest_bucket_name=None, source_version_id=None): """ """ if dest_bucket_name is None: dest_bucket_name, dest_bucket_key = self.parse_s3_url(dest_bucket_key) else: parsed_url = urlparse(dest_bucket_key) if parsed_url.scheme != '' or parsed_url.netloc != '': raise AirflowException('If dest_bucket_name is provided, ' + 'dest_bucket_key should be relative path ' + 'from root level, rather than a full s3:// url') if source_bucket_name is None: source_bucket_name, source_bucket_key = self.parse_s3_url(source_bucket_key) else: parsed_url = urlparse(source_bucket_key) if parsed_url.scheme != '' or parsed_url.netloc != '': raise AirflowException('If source_bucket_name is provided, ' + 'source_bucket_key should be relative path ' + 'from root level, rather than a full s3:// url') CopySource = {'Bucket': source_bucket_name, 'Key': source_bucket_key, 'VersionId': source_version_id} response = self.get_conn().copy_object(Bucket=dest_bucket_name, Key=dest_bucket_key, CopySource=CopySource) return response
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L196-L216
def default_multivariate_normal_fn(dtype, shape, name, trainable, add_variable_fn): """ """ del name, trainable, add_variable_fn # unused dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype(1)) batch_ndims = tf.size(input=dist.batch_shape_tensor()) return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L124-L140
def does_database_exist(self, database_name): """ """ if database_name is None: raise AirflowBadRequest("Database name cannot be None.") existing_database = list(self.get_conn().QueryDatabases({ "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": database_name} ] })) if len(existing_database) == 0: return False return True
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/slice_sampler_kernel.py#L540-L552
def _maybe_call_fn(fn, fn_arg_list, fn_result=None, description='target_log_prob'): """""" fn_arg_list = (list(fn_arg_list) if mcmc_util.is_list_like(fn_arg_list) else [fn_arg_list]) if fn_result is None: fn_result = fn(*fn_arg_list) if not fn_result.dtype.is_floating: raise TypeError('`{}` must be a `Tensor` with `float` `dtype`.'.format( description)) return fn_result
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L374-L383
def buildhtmlheader(self): """""" self.htmlheader = '' # If the JavaScript assets have already been injected, don't bother re-sourcing them. global _js_initialized if '_js_initialized' not in globals() or not _js_initialized: for css in self.header_css: self.htmlheader += css for js in self.header_js: self.htmlheader += js
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/utils.py#L193-L201
def json_response(obj): """ """ return Response( response=json.dumps( obj, indent=4, cls=AirflowJsonEncoder), status=200, mimetype="application/json")
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/hidden_markov_model.py#L489-L517
def _marginal_hidden_probs(self): """""" initial_log_probs = tf.broadcast_to(self._log_init, tf.concat([self.batch_shape_tensor(), [self._num_states]], axis=0)) # initial_log_probs :: batch_shape num_states if self._num_steps > 1: transition_log_probs = self._log_trans def forward_step(log_probs, _): return _log_vector_matrix(log_probs, transition_log_probs) dummy_index = tf.zeros(self._num_steps - 1, dtype=tf.float32) forward_log_probs = tf.scan(forward_step, dummy_index, initializer=initial_log_probs, name="forward_log_probs") forward_log_probs = tf.concat([[initial_log_probs], forward_log_probs], axis=0) else: forward_log_probs = initial_log_probs[tf.newaxis, ...] # returns :: num_steps batch_shape num_states return tf.exp(forward_log_probs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/wasb_hook.py#L120-L135
def get_file(self, file_path, container_name, blob_name, **kwargs): """ """ return self.connection.get_blob_to_path(container_name, blob_name, file_path, **kwargs)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/structural_time_series.py#L161-L209
def prior_sample(self, num_timesteps, initial_step=0, params_sample_shape=(), trajectories_sample_shape=(), seed=None): """ """ seed = distributions.SeedStream( seed, salt='StructuralTimeSeries_prior_sample') with tf.compat.v1.name_scope( 'prior_sample', values=[num_timesteps, params_sample_shape, trajectories_sample_shape]): param_samples = [ p.prior.sample(params_sample_shape, seed=seed(), name=p.name) for p in self.parameters ] model = self.make_state_space_model( num_timesteps=num_timesteps, initial_step=initial_step, param_vals=param_samples) return model.sample(trajectories_sample_shape, seed=seed()), param_samples
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/structural_time_series.py#L80-L94
def batch_shape(self): """ """ batch_shape = tf.TensorShape([]) for param in self.parameters: batch_shape = tf.broadcast_static_shape( batch_shape, param.prior.batch_shape) return batch_shape
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/random_ops.py#L61-L99
def random_rayleigh(shape, scale=None, dtype=tf.float32, seed=None, name=None): """ """ with tf.compat.v1.name_scope(name, 'random_rayleigh', [shape, scale, seed]): if scale is not None: # Its important to expand the shape to match scale's, otherwise we won't # have independent draws. scale = tf.convert_to_tensor(value=scale, dtype=dtype, name='scale') shape = tf.broadcast_dynamic_shape(shape, tf.shape(input=scale)) x = tf.sqrt(-2. * tf.math.log( tf.random.uniform(shape, minval=0, maxval=1, dtype=dtype, seed=seed))) if scale is None: return x return x * scale
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_pubsub_hook.py#L263-L285
def acknowledge(self, project, subscription, ack_ids): """ """ service = self.get_conn() full_subscription = _format_subscription(project, subscription) try: service.projects().subscriptions().acknowledge( subscription=full_subscription, body={'ackIds': ack_ids} ).execute(num_retries=self.num_retries) except HttpError as e: raise PubSubException( 'Error acknowledging {} messages pulled from subscription {}' .format(len(ack_ids), full_subscription), e)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L212-L246
def resize(img, size, interpolation=Image.BILINEAR): """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) if not (isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)): raise TypeError('Got inappropriate size arg: {}'.format(size)) if isinstance(size, int): w, h = img.size if (w <= h and w == size) or (h <= w and h == size): return img if w < h: ow = size oh = int(size * h / w) return img.resize((ow, oh), interpolation) else: oh = size ow = int(size * w / h) return img.resize((ow, oh), interpolation) else: return img.resize(size[::-1], interpolation)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L593-L599
def get_tables(self, db, pattern='*'): """ """ with self.metastore as client: tables = client.get_tables(db_name=db, pattern=pattern) return client.get_table_objects_by_name(db, tables)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_container_instance_hook.py#L133-L147
def get_logs(self, resource_group, name, tail=1000): """ """ logs = self.connection.container.list_logs(resource_group, name, name, tail=tail) return logs.content.splitlines(True)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/trainable_distributions/trainable_distributions_lib.py#L284-L387
def normal(x, layer_fn=tf.compat.v1.layers.dense, loc_fn=lambda x: x, scale_fn=1., name=None): """ """ with tf.compat.v1.name_scope(name, 'normal', [x]): x = tf.convert_to_tensor(value=x, name='x') if callable(scale_fn): y = layer_fn(x, 2) loc = loc_fn(y[..., 0]) scale = scale_fn(y[..., 1]) else: y = tf.squeeze(layer_fn(x, 1), axis=-1) loc = loc_fn(y) scale = tf.cast(scale_fn, loc.dtype.base_dtype) return tfd.Normal(loc=loc, scale=scale)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_sequential.py#L38-L46
def _make_summary_statistic(attr): """""" def _fn(self): if any(self._dist_fn_args): # pylint: disable=protected-access raise ValueError( 'Can only compute ' + attr + ' when all distributions are ' 'independent; {}'.format(self.model)) return self._unflatten(getattr(d(), attr)() for d in self._dist_fn_wrapped) # pylint: disable=protected-access return _fn
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/forecast.py#L35-L169
def one_step_predictive(model, observed_time_series, parameter_samples): """ """ with tf.compat.v1.name_scope( 'one_step_predictive', values=[observed_time_series, parameter_samples]): [ observed_time_series, is_missing ] = sts_util.canonicalize_observed_time_series_with_mask( observed_time_series) # Run filtering over the training timesteps to extract the # predictive means and variances. num_timesteps = dist_util.prefer_static_value( tf.shape(input=observed_time_series))[-2] lgssm = model.make_state_space_model( num_timesteps=num_timesteps, param_vals=parameter_samples) (_, _, _, _, _, observation_means, observation_covs ) = lgssm.forward_filter(observed_time_series, mask=is_missing) # Squeeze dims to convert from LGSSM's event shape `[num_timesteps, 1]` # to a scalar time series. return sts_util.mix_over_posterior_draws( means=observation_means[..., 0], variances=observation_covs[..., 0, 0])
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/executors/kubernetes_executor.py#L473-L490
def _make_safe_pod_id(safe_dag_id, safe_task_id, safe_uuid): """ """ MAX_POD_ID_LEN = 253 safe_key = safe_dag_id + safe_task_id safe_pod_id = safe_key[:MAX_POD_ID_LEN - len(safe_uuid) - 1] + "-" + safe_uuid return safe_pod_id
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L646-L661
def _make_list_or_1d_tensor(values): """""" values = tf.convert_to_tensor(value=values, name='values') values_ = tf.get_static_value(values) # Static didn't work. if values_ is None: # Cheap way to bring to at least 1d. return values + tf.zeros([1], dtype=values.dtype) # Static worked! if values_.ndim > 1: raise ValueError('values had > 1 dim: {}'.format(values_.shape)) # Cheap way to bring to at least 1d. values_ = values_ + np.zeros([1], dtype=values_.dtype) return list(values_)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/math/sparse.py#L30-L61
def dense_to_sparse(x, ignore_value=None, name=None): """ """ # Copied (with modifications) from: # tensorflow/contrib/layers/python/ops/sparse_ops.py. with tf.compat.v1.name_scope(name, 'dense_to_sparse', [x, ignore_value]): x = tf.convert_to_tensor(value=x, name='x') if ignore_value is None: if x.dtype.base_dtype == tf.string: # Exception due to TF strings are converted to numpy objects by default. ignore_value = '' else: ignore_value = x.dtype.as_numpy_dtype(0) ignore_value = tf.cast(ignore_value, x.dtype, name='ignore_value') indices = tf.where(tf.not_equal(x, ignore_value), name='indices') return tf.SparseTensor( indices=indices, values=tf.gather_nd(x, indices, name='values'), dense_shape=tf.shape(input=x, out_type=tf.int64, name='dense_shape'))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/initializers.py#L119-L126
def from_config(cls, config): """""" return cls(**{ 'initializers': [tf.compat.v2.initializers.deserialize(init) for init in config.get('initializers', [])], 'sizes': config.get('sizes', []), 'validate_args': config.get('validate_args', False), })
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/logging_mixin.py#L166-L184
def set_context(logger, value): """ """ _logger = logger while _logger: for handler in _logger.handlers: try: handler.set_context(value) except AttributeError: # Not all handlers need to have context passed in so we ignore # the error when handlers do not have set_context defined. pass if _logger.propagate is True: _logger = _logger.parent else: _logger = None
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/affine.py#L238-L309
def _create_scale_operator(self, identity_multiplier, diag, tril, perturb_diag, perturb_factor, shift, validate_args, dtype): """ """ identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier", dtype) diag = _as_tensor(diag, "diag", dtype) tril = _as_tensor(tril, "tril", dtype) perturb_diag = _as_tensor(perturb_diag, "perturb_diag", dtype) perturb_factor = _as_tensor(perturb_factor, "perturb_factor", dtype) # If possible, use the low rank update to infer the shape of # the identity matrix, when scale represents a scaled identity matrix # with a low rank update. shape_hint = None if perturb_factor is not None: shape_hint = distribution_util.dimension_size(perturb_factor, axis=-2) if self._is_only_identity_multiplier: if validate_args: return distribution_util.with_dependencies([ assert_util.assert_none_equal( identity_multiplier, tf.zeros([], identity_multiplier.dtype), ["identity_multiplier should be non-zero."]) ], identity_multiplier) return identity_multiplier scale = distribution_util.make_tril_scale( loc=shift, scale_tril=tril, scale_diag=diag, scale_identity_multiplier=identity_multiplier, validate_args=validate_args, assert_positive=False, shape_hint=shape_hint) if perturb_factor is not None: return tf.linalg.LinearOperatorLowRankUpdate( scale, u=perturb_factor, diag_update=perturb_diag, is_diag_update_positive=perturb_diag is None, is_non_singular=True, # Implied by is_positive_definite=True. is_self_adjoint=True, is_positive_definite=True, is_square=True) return scale
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L133-L137
def sample_shape(self): """""" if isinstance(self._sample_shape, tf.Tensor): return tf.TensorShape(tf.get_static_value(self._sample_shape)) return tf.TensorShape(self._sample_shape)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L488-L515
def call(self, inputs): """ """ # TODO(dusenberrymw): Remove these reshaping commands after b/113126249 is # fixed. collapsed_shape = tf.concat(([-1], tf.shape(input=inputs)[-2:]), axis=0) out = tf.reshape(inputs, collapsed_shape) # (sample*batch_size, T, hidden) out = self.bilstm(out) # (sample*batch_size, hidden) expanded_shape = tf.concat((tf.shape(input=inputs)[:-2], [-1]), axis=0) out = tf.reshape(out, expanded_shape) # (sample, batch_size, hidden) out = self.output_layer(out) # (sample, batch_size, 2*latent_size) loc = out[..., :self.latent_size] scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-5 # keep > 0 return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L519-L572
def describe_training_job_with_log(self, job_name, positions, stream_names, instance_count, state, last_description, last_describe_job_call): """ """ log_group = '/aws/sagemaker/TrainingJobs' if len(stream_names) < instance_count: # Log streams are created whenever a container starts writing to stdout/err, so this list # may be dynamic until we have a stream for every instance. logs_conn = self.get_log_conn() try: streams = logs_conn.describe_log_streams( logGroupName=log_group, logStreamNamePrefix=job_name + '/', orderBy='LogStreamName', limit=instance_count ) stream_names = [s['logStreamName'] for s in streams['logStreams']] positions.update([(s, Position(timestamp=0, skip=0)) for s in stream_names if s not in positions]) except logs_conn.exceptions.ResourceNotFoundException: # On the very first training job run on an account, there's no log group until # the container starts logging, so ignore any errors thrown about that pass if len(stream_names) > 0: for idx, event in self.multi_stream_iter(log_group, stream_names, positions): self.log.info(event['message']) ts, count = positions[stream_names[idx]] if event['timestamp'] == ts: positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1) else: positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1) if state == LogState.COMPLETE: return state, last_description, last_describe_job_call if state == LogState.JOB_COMPLETE: state = LogState.COMPLETE elif time.time() - last_describe_job_call >= 30: description = self.describe_training_job(job_name) last_describe_job_call = time.time() if secondary_training_status_changed(description, last_description): self.log.info(secondary_training_status_message(description, last_description)) last_description = description status = description['TrainingJobStatus'] if status not in self.non_terminal_states: state = LogState.JOB_COMPLETE return state, last_description, last_describe_job_call
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L641-L680
def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map): """ """ if not part_specs: return None # Assuming all specs have the same keys. if partition_key not in part_specs[0].keys(): raise AirflowException("Provided partition_key {} " "is not in part_specs.".format(partition_key)) if filter_map: is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys())) if filter_map and not is_subset: raise AirflowException("Keys in provided filter_map {} " "are not subset of part_spec keys: {}" .format(', '.join(filter_map.keys()), ', '.join(part_specs[0].keys()))) candidates = [p_dict[partition_key] for p_dict in part_specs if filter_map is None or all(item in p_dict.items() for item in filter_map.items())] if not candidates: return None else: return max(candidates).encode('utf-8')
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_text_to_speech_hook.py#L42-L51
def get_conn(self): """ """ if not self._client: self._client = TextToSpeechClient(credentials=self._get_credentials()) return self._client
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/backend/numpy/internal/utils.py#L58-L74
def common_dtype(args_list, preferred_dtype=None): """""" dtype = None preferred_dtype = (None if preferred_dtype is None else tf.as_dtype(preferred_dtype)) for a in tf.nest.flatten(args_list): if hasattr(a, 'dtype'): dt = tf.as_dtype(a.dtype) else: continue if dtype is None: dtype = dt elif dtype != dt: raise TypeError('Found incompatible dtypes, {} and {}.'.format(dtype, dt)) if dtype is None and preferred_dtype is None: return None return (preferred_dtype if dtype is None else dtype).as_numpy_dtype
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L1015-L1039
def __get_concurrency_maps(self, states, session=None): """ """ TI = models.TaskInstance ti_concurrency_query = ( session .query(TI.task_id, TI.dag_id, func.count('*')) .filter(TI.state.in_(states)) .group_by(TI.task_id, TI.dag_id) ).all() dag_map = defaultdict(int) task_map = defaultdict(int) for result in ti_concurrency_query: task_id, dag_id, count = result dag_map[dag_id] += count task_map[(dag_id, task_id)] = count return dag_map, task_map
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/sample_halton_sequence.py#L368-L384
def _primes_less_than(n): # Based on # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188 """""" small_primes = np.array((2, 3, 5)) if n <= 6: return small_primes[small_primes < n] sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool) sieve[0] = False m = int(n ** 0.5) // 3 + 1 for i in range(m): if not sieve[i]: continue k = 3 * i + 1 | 1 sieve[k ** 2 // 3::2 * k] = False sieve[(k ** 2 + 4 * k - 2 * k * (i & 1)) // 3::2 * k] = False return np.r_[2, 3, 3 * np.nonzero(sieve)[0] + 1 | 1]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/diagnostic.py#L146-L200
def _effective_sample_size_single_state(states, filter_beyond_lag, filter_threshold): """""" with tf.compat.v1.name_scope( 'effective_sample_size_single_state', values=[states, filter_beyond_lag, filter_threshold]): states = tf.convert_to_tensor(value=states, name='states') dt = states.dtype # filter_beyond_lag == None ==> auto_corr is the full sequence. auto_corr = stats.auto_correlation( states, axis=0, max_lags=filter_beyond_lag) if filter_threshold is not None: filter_threshold = tf.convert_to_tensor( value=filter_threshold, dtype=dt, name='filter_threshold') # Get a binary mask to zero out values of auto_corr below the threshold. # mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i, # mask[i, ...] = 0, otherwise. # So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...] # Building step by step, # Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2. # Step 1: mask = [False, False, True, False] mask = auto_corr < filter_threshold # Step 2: mask = [0, 0, 1, 1] mask = tf.cast(mask, dtype=dt) # Step 3: mask = [0, 0, 1, 2] mask = tf.cumsum(mask, axis=0) # Step 4: mask = [1, 1, 0, 0] mask = tf.maximum(1. - mask, 0.) auto_corr *= mask # With R[k] := auto_corr[k, ...], # ESS = N / {1 + 2 * Sum_{k=1}^N (N - k) / N * R[k]} # = N / {-1 + 2 * Sum_{k=0}^N (N - k) / N * R[k]} (since R[0] = 1) # approx N / {-1 + 2 * Sum_{k=0}^M (N - k) / N * R[k]} # where M is the filter_beyond_lag truncation point chosen above. # Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total # ndims the same as auto_corr n = _axis_size(states, axis=0) k = tf.range(0., _axis_size(auto_corr, axis=0)) nk_factor = (n - k) / n if auto_corr.shape.ndims is not None: new_shape = [-1] + [1] * (auto_corr.shape.ndims - 1) else: new_shape = tf.concat( ([-1], tf.ones([tf.rank(auto_corr) - 1], dtype=tf.int32)), axis=0) nk_factor = tf.reshape(nk_factor, new_shape) return n / (-1 + 2 * tf.reduce_sum(input_tensor=nk_factor * auto_corr, axis=0))
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L391-L407
def _log_ndtr_asymptotic_series(x, series_order): """""" npdt = dtype_util.as_numpy_dtype(x.dtype) if series_order <= 0: return npdt(1) x_2 = tf.square(x) even_sum = tf.zeros_like(x) odd_sum = tf.zeros_like(x) x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1. for n in range(1, series_order + 1): y = npdt(_double_factorial(2 * n - 1)) / x_2n if n % 2: odd_sum += y else: even_sum += y x_2n *= x_2 return 1. + even_sum - odd_sum
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L1035-L1052
def summarize_dist_params(dist, name, name_scope="dist_params"): """ """ with tf.compat.v1.name_scope(name_scope): tf.compat.v2.summary.histogram( name="{}/{}".format(name, "mean"), data=dist.mean(), step=tf.compat.v1.train.get_or_create_global_step()) tf.compat.v2.summary.histogram( name="{}/{}".format(name, "stddev"), data=dist.stddev(), step=tf.compat.v1.train.get_or_create_global_step())
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L470-L474
def key(self): """ """ return self.dag_id, self.task_id, self.execution_date, self.try_number
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datastore_hook.py#L83-L100
def begin_transaction(self): """ """ conn = self.get_conn() resp = (conn .projects() .beginTransaction(projectId=self.project_id, body={}) .execute(num_retries=self.num_retries)) return resp['transaction']
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L182-L200
def update_transfer_job(self, job_name, body): """ """ body = self._inject_project_id(body, BODY, PROJECT_ID) return ( self.get_conn() .transferJobs() .patch(jobName=job_name, body=body) .execute(num_retries=self.num_retries) )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/cassandra_to_gcs.py#L269-L280
def convert_map_type(cls, name, value): """ """ converted_map = [] for k, v in zip(value.keys(), value.values()): converted_map.append({ 'key': cls.convert_value('key', k), 'value': cls.convert_value('value', v) }) return converted_map
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/http_hook.py#L183-L211
def run_with_advanced_retry(self, _retry_args, *args, **kwargs): """ """ self._retry_obj = tenacity.Retrying( **_retry_args ) self._retry_obj(self.run, *args, **kwargs)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/random_variable.py#L270-L275
def numpy(self): """""" if not isinstance(self.value, ops.EagerTensor): raise NotImplementedError("value argument must be a EagerTensor.") return self.value.numpy()
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/phototour.py#L189-L196
def read_info_file(data_dir, info_file): """ """ labels = [] with open(os.path.join(data_dir, info_file), 'r') as f: labels = [int(line.split()[0]) for line in f] return torch.LongTensor(labels)