_id
stringlengths
98
184
text
stringlengths
91
10.9k
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py#L420-L542
def _line_search_after_bracketing( value_and_gradients_function, search_interval, val_0, f_lim, max_iterations, sufficient_decrease_param, curvature_param, shrinkage_param): """ """ def _loop_cond(curr_interval): """Loop condition.""" active = ~(curr_interval.converged | curr_interval.failed) return (curr_interval.iterations < max_iterations) & tf.reduce_any(input_tensor=active) def _loop_body(curr_interval): """The loop body.""" secant2_raw_result = hzl.secant2( value_and_gradients_function, val_0, curr_interval, f_lim, sufficient_decrease_param, curvature_param) secant2_result = HagerZhangLineSearchResult( converged=secant2_raw_result.converged, failed=secant2_raw_result.failed, iterations=curr_interval.iterations + 1, func_evals=secant2_raw_result.num_evals, left=secant2_raw_result.left, right=secant2_raw_result.right) should_check_shrinkage = ~(secant2_result.converged | secant2_result.failed) def _do_check_shrinkage(): """Check if interval has shrinked enough.""" old_width = curr_interval.right.x - curr_interval.left.x new_width = secant2_result.right.x - secant2_result.left.x sufficient_shrinkage = new_width < old_width * shrinkage_param func_is_flat = ( _very_close(curr_interval.left.f, curr_interval.right.f) & _very_close(secant2_result.left.f, secant2_result.right.f)) new_converged = ( should_check_shrinkage & sufficient_shrinkage & func_is_flat) needs_inner_bisect = should_check_shrinkage & ~sufficient_shrinkage inner_bisect_args = secant2_result._replace( converged=secant2_result.converged | new_converged) def _apply_inner_bisect(): return _line_search_inner_bisection( value_and_gradients_function, inner_bisect_args, needs_inner_bisect, f_lim) return prefer_static.cond( tf.reduce_any(input_tensor=needs_inner_bisect), _apply_inner_bisect, lambda: inner_bisect_args) next_args = prefer_static.cond( tf.reduce_any(input_tensor=should_check_shrinkage), _do_check_shrinkage, lambda: secant2_result) interval_shrunk = ( ~next_args.failed & _very_close(next_args.left.x, next_args.right.x)) return [next_args._replace(converged=next_args.converged | interval_shrunk)] return tf.while_loop( cond=_loop_cond, body=_loop_body, loop_vars=[search_interval], parallel_iterations=1)[0]
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/ftp_hook.py#L152-L223
def retrieve_file( self, remote_full_path, local_full_path_or_buffer, callback=None): """ """ conn = self.get_conn() is_path = isinstance(local_full_path_or_buffer, basestring) # without a callback, default to writing to a user-provided file or # file-like buffer if not callback: if is_path: output_handle = open(local_full_path_or_buffer, 'wb') else: output_handle = local_full_path_or_buffer callback = output_handle.write else: output_handle = None remote_path, remote_file_name = os.path.split(remote_full_path) conn.cwd(remote_path) self.log.info('Retrieving file from FTP: %s', remote_full_path) conn.retrbinary('RETR %s' % remote_file_name, callback) self.log.info('Finished retrieving file from FTP: %s', remote_full_path) if is_path and output_handle: output_handle.close()
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/conv_variational.py#L249-L284
def compute_output_shape(self, input_shape): """ """ input_shape = tf.TensorShape(input_shape).as_list() if self.data_format == 'channels_last': space = input_shape[1:-1] new_space = [] for i in range(len(space)): new_dim = tf_layers_util.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return tf.TensorShape([input_shape[0]] + new_space + [self.filters]) else: space = input_shape[2:] new_space = [] for i in range(len(space)): new_dim = tf_layers_util.conv_output_length( space[i], self.kernel_size[i], padding=self.padding, stride=self.strides[i], dilation=self.dilation_rate[i]) new_space.append(new_dim) return tf.TensorShape([input_shape[0], self.filters] + new_space)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/internal/missing_values_util.py#L137-L162
def initial_value_of_masked_time_series(time_series_tensor, broadcast_mask): """ """ num_timesteps = tf.shape(input=time_series_tensor)[-1] # Compute the index of the first unmasked entry for each series in the batch. unmasked_negindices = ( tf.cast(~broadcast_mask, tf.int32) * tf.range(num_timesteps, 0, -1)) first_unmasked_indices = num_timesteps - tf.reduce_max( input_tensor=unmasked_negindices, axis=-1) if first_unmasked_indices.shape.ndims is None: raise NotImplementedError( 'Cannot compute initial values of a masked time series with' 'dynamic rank.') # `batch_gather` requires static rank # Extract the initial value for each series in the batch. return tf.squeeze(tf.compat.v1.batch_gather( params=time_series_tensor, indices=first_unmasked_indices[..., tf.newaxis]), axis=-1)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/static_config.py#L29-L68
def configure_manifest_files(app): """ """ def parse_manifest_json(): # noinspection PyBroadException try: global manifest manifest_file = os.path.join(os.path.dirname(__file__), 'static/dist/manifest.json') with open(manifest_file, 'r') as f: manifest.update(json.load(f)) for k in manifest.keys(): manifest[k] = os.path.join("dist", manifest[k]) except Exception: print("Please make sure to build the frontend in " "static/ directory and restart the server") pass def get_asset_url(filename): if app.debug: parse_manifest_json() return url_for('static', filename=manifest.get(filename, '')) parse_manifest_json() @app.context_processor def get_url_for_asset(): """ Template tag to return the asset URL. WebPack renders the assets after minification and modification under the static/dist folder. This template tag reads the asset name in manifest.json and returns the appropriate file. """ return dict(url_for_asset=get_asset_url)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L185-L202
def check_for_key(self, key, bucket_name=None): """ """ if not bucket_name: (bucket_name, key) = self.parse_s3_url(key) try: self.get_conn().head_object(Bucket=bucket_name, Key=key) return True except ClientError as e: self.log.info(e.response["Error"]["Message"]) return False
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L115-L136
def list_files(root, suffix, prefix=False): """ """ root = os.path.expanduser(root) files = list( filter( lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix), os.listdir(root) ) ) if prefix is True: files = [os.path.join(root, d) for d in files] return files
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/druid_hook.py#L127-L139
def get_conn(self): """ """ conn = self.get_connection(self.druid_broker_conn_id) druid_broker_conn = connect( host=conn.host, port=conn.port, path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'), scheme=conn.extra_dejson.get('schema', 'http') ) self.log.info('Get the connection to druid broker on %s', conn.host) return druid_broker_conn
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/semilocal_linear_trend.py#L241-L263
def semilocal_linear_trend_transition_matrix(autoregressive_coef): """""" # We want to write the following 2 x 2 matrix: # [[1., 1., ], # level(t+1) = level(t) + slope(t) # [0., ar_coef], # slope(t+1) = ar_coef * slope(t) # but it's slightly tricky to properly incorporate the batch shape of # autoregressive_coef. E.g., if autoregressive_coef has shape [4,6], we want # to return shape [4, 6, 2, 2]. We do this by breaking the matrix into its # fixed entries, written explicitly, and then the autoregressive_coef part # which we add in after using a mask to broadcast to the correct matrix shape. fixed_entries = tf.constant( [[1., 1.], [0., 0.]], dtype=autoregressive_coef.dtype) autoregressive_coef_mask = tf.constant([[0., 0.], [0., 1.]], dtype=autoregressive_coef.dtype) bottom_right_entry = (autoregressive_coef[..., tf.newaxis, tf.newaxis] * autoregressive_coef_mask) return tf.linalg.LinearOperatorFullMatrix( fixed_entries + bottom_right_entry)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/s3_task_handler.py#L127-L144
def s3_read(self, remote_log_location, return_error=False): """ """ try: return self.hook.read_key(remote_log_location) except Exception: msg = 'Could not read logs from {}'.format(remote_log_location) self.log.exception(msg) # return error if needed if return_error: return msg
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sqoop_hook.py#L92-L116
def Popen(self, cmd, **kwargs): """ """ masked_cmd = ' '.join(self.cmd_mask_password(cmd)) self.log.info("Executing command: {}".format(masked_cmd)) self.sp = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) for line in iter(self.sp.stdout): self.log.info(line.strip()) self.sp.wait() self.log.info("Command exited with return code %s", self.sp.returncode) if self.sp.returncode: raise AirflowException("Sqoop command failed: {}".format(masked_cmd))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mlengine_operator.py#L29-L62
def _normalize_mlengine_job_id(job_id): """ """ # Add a prefix when a job_id starts with a digit or a template match = re.search(r'\d|\{{2}', job_id) if match and match.start() == 0: job = 'z_{}'.format(job_id) else: job = job_id # Clean up 'bad' characters except templates tracker = 0 cleansed_job_id = '' for m in re.finditer(r'\{{2}.+?\}{2}', job): cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:m.start()]) cleansed_job_id += job[m.start():m.end()] tracker = m.end() # Clean up last substring or the full string if no templates cleansed_job_id += re.sub(r'[^0-9a-zA-Z]+', '_', job[tracker:]) return cleansed_job_id
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/wasb_hook.py#L153-L191
def delete_file(self, container_name, blob_name, is_prefix=False, ignore_if_missing=False, **kwargs): """ """ if is_prefix: blobs_to_delete = [ blob.name for blob in self.connection.list_blobs( container_name, prefix=blob_name, **kwargs ) ] elif self.check_for_blob(container_name, blob_name): blobs_to_delete = [blob_name] else: blobs_to_delete = [] if not ignore_if_missing and len(blobs_to_delete) == 0: raise AirflowException('Blob(s) not found: {}'.format(blob_name)) for blob_uri in blobs_to_delete: self.log.info("Deleting blob: " + blob_uri) self.connection.delete_blob(container_name, blob_uri, delete_snapshots='include', **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L132-L166
def run(self, sql, autocommit=False, parameters=None): """ """ if isinstance(sql, basestring): sql = [sql] with closing(self.get_conn()) as conn: if self.supports_autocommit: self.set_autocommit(conn, autocommit) with closing(conn.cursor()) as cur: for s in sql: if parameters is not None: self.log.info("{} with parameters {}".format(s, parameters)) cur.execute(s, parameters) else: self.log.info(s) cur.execute(s) # If autocommit was set to False for db that supports autocommit, # or if db does not supports autocommit, we do a manual commit. if not self.get_autocommit(conn): conn.commit()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/salesforce_hook.py#L186-L293
def write_object_to_file(self, query_results, filename, fmt="csv", coerce_to_timestamp=False, record_time_added=False): """ """ fmt = fmt.lower() if fmt not in ['csv', 'json', 'ndjson']: raise ValueError("Format value is not recognized: {}".format(fmt)) # this line right here will convert all integers to floats # if there are any None/np.nan values in the column # that's because None/np.nan cannot exist in an integer column # we should write all of our timestamps as FLOATS in our final schema df = pd.DataFrame.from_records(query_results, exclude=["attributes"]) df.columns = [column.lower() for column in df.columns] # convert columns with datetime strings to datetimes # not all strings will be datetimes, so we ignore any errors that occur # we get the object's definition at this point and only consider # features that are DATE or DATETIME if coerce_to_timestamp and df.shape[0] > 0: # get the object name out of the query results # it's stored in the "attributes" dictionary # for each returned record object_name = query_results[0]['attributes']['type'] self.log.info("Coercing timestamps for: %s", object_name) schema = self.describe_object(object_name) # possible columns that can be converted to timestamps # are the ones that are either date or datetime types # strings are too general and we risk unintentional conversion possible_timestamp_cols = [ field['name'].lower() for field in schema['fields'] if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns ] df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp) if record_time_added: fetched_time = time.time() df["time_fetched_from_salesforce"] = fetched_time # write the CSV or JSON file depending on the option # NOTE: # datetimes here are an issue. # There is no good way to manage the difference # for to_json, the options are an epoch or a ISO string # but for to_csv, it will be a string output by datetime # For JSON we decided to output the epoch timestamp in seconds # (as is fairly standard for JavaScript) # And for csv, we do a string if fmt == "csv": # there are also a ton of newline objects that mess up our ability to write to csv # we remove these newlines so that the output is a valid CSV format self.log.info("Cleaning data and writing to CSV") possible_strings = df.columns[df.dtypes == "object"] df[possible_strings] = df[possible_strings].apply( lambda x: x.str.replace("\r\n", "").str.replace("\n", "") ) # write the dataframe df.to_csv(filename, index=False) elif fmt == "json": df.to_json(filename, "records", date_unit="s") elif fmt == "ndjson": df.to_json(filename, "records", lines=True, date_unit="s") return df
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_natural_language_hook.py#L44-L53
def get_conn(self): """ """ if not self._conn: self._conn = LanguageServiceClient(credentials=self._get_credentials()) return self._conn
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/transforms/functional.py#L644-L677
def adjust_gamma(img, gamma, gain=1): """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) if gamma < 0: raise ValueError('Gamma should be a non-negative real number') input_mode = img.mode img = img.convert('RGB') gamma_map = [255 * gain * pow(ele / 255., gamma) for ele in range(256)] * 3 img = img.point(gamma_map) # use PIL's point-function to accelerate this part img = img.convert(input_mode) return img
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/mnist.py#L132-L164
def download(self): """""" if self._check_exists(): return makedir_exist_ok(self.raw_folder) makedir_exist_ok(self.processed_folder) # download files for url in self.urls: filename = url.rpartition('/')[2] file_path = os.path.join(self.raw_folder, filename) download_url(url, root=self.raw_folder, filename=filename, md5=None) self.extract_gzip(gzip_path=file_path, remove_finished=True) # process and save as torch files print('Processing...') training_set = ( read_image_file(os.path.join(self.raw_folder, 'train-images-idx3-ubyte')), read_label_file(os.path.join(self.raw_folder, 'train-labels-idx1-ubyte')) ) test_set = ( read_image_file(os.path.join(self.raw_folder, 't10k-images-idx3-ubyte')), read_label_file(os.path.join(self.raw_folder, 't10k-labels-idx1-ubyte')) ) with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f: torch.save(training_set, f) with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f: torch.save(test_set, f) print('Done!')
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/bin/cli.py#L763-L868
def restart_workers(gunicorn_master_proc, num_workers_expected, master_timeout): """ """ def wait_until_true(fn, timeout=0): """ Sleeps until fn is true """ t = time.time() while not fn(): if 0 < timeout <= time.time() - t: raise AirflowWebServerTimeout( "No response from gunicorn master within {0} seconds" .format(timeout)) time.sleep(0.1) def start_refresh(gunicorn_master_proc): batch_size = conf.getint('webserver', 'worker_refresh_batch_size') log.debug('%s doing a refresh of %s workers', state, batch_size) sys.stdout.flush() sys.stderr.flush() excess = 0 for _ in range(batch_size): gunicorn_master_proc.send_signal(signal.SIGTTIN) excess += 1 wait_until_true(lambda: num_workers_expected + excess == get_num_workers_running(gunicorn_master_proc), master_timeout) try: wait_until_true(lambda: num_workers_expected == get_num_workers_running(gunicorn_master_proc), master_timeout) while True: num_workers_running = get_num_workers_running(gunicorn_master_proc) num_ready_workers_running = \ get_num_ready_workers_running(gunicorn_master_proc) state = '[{0} / {1}]'.format(num_ready_workers_running, num_workers_running) # Whenever some workers are not ready, wait until all workers are ready if num_ready_workers_running < num_workers_running: log.debug('%s some workers are starting up, waiting...', state) sys.stdout.flush() time.sleep(1) # Kill a worker gracefully by asking gunicorn to reduce number of workers elif num_workers_running > num_workers_expected: excess = num_workers_running - num_workers_expected log.debug('%s killing %s workers', state, excess) for _ in range(excess): gunicorn_master_proc.send_signal(signal.SIGTTOU) excess -= 1 wait_until_true(lambda: num_workers_expected + excess == get_num_workers_running(gunicorn_master_proc), master_timeout) # Start a new worker by asking gunicorn to increase number of workers elif num_workers_running == num_workers_expected: refresh_interval = conf.getint('webserver', 'worker_refresh_interval') log.debug( '%s sleeping for %ss starting doing a refresh...', state, refresh_interval ) time.sleep(refresh_interval) start_refresh(gunicorn_master_proc) else: # num_ready_workers_running == num_workers_running < num_workers_expected log.error(( "%s some workers seem to have died and gunicorn" "did not restart them as expected" ), state) time.sleep(10) if len( psutil.Process(gunicorn_master_proc.pid).children() ) < num_workers_expected: start_refresh(gunicorn_master_proc) except (AirflowWebServerTimeout, OSError) as err: log.error(err) log.error("Shutting down webserver") try: gunicorn_master_proc.terminate() gunicorn_master_proc.wait() finally: sys.exit(1)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L515-L550
def compose(self, bucket_name, source_objects, destination_object): """ """ if not source_objects or not len(source_objects): raise ValueError('source_objects cannot be empty.') if not bucket_name or not destination_object: raise ValueError('bucket_name and destination_object cannot be empty.') self.log.info("Composing %s to %s in the bucket %s", source_objects, destination_object, bucket_name) client = self.get_conn() bucket = client.get_bucket(bucket_name) destination_blob = bucket.blob(destination_object) destination_blob.compose( sources=[ bucket.blob(blob_name=source_object) for source_object in source_objects ]) self.log.info("Completed successfully.")
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_transfer_operator.py#L106-L110
def _convert_date_to_dict(field_date): """ """ return {DAY: field_date.day, MONTH: field_date.month, YEAR: field_date.year}
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dag.py#L1494-L1526
def create_dagrun(self, run_id, state, execution_date, start_date=None, external_trigger=False, conf=None, session=None): """ """ return self.get_dag().create_dagrun(run_id=run_id, state=state, execution_date=execution_date, start_date=start_date, external_trigger=external_trigger, conf=conf, session=session)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/joint_distribution_sequential.py#L475-L487
def _get_required_args(fn): """""" argspec = tf_inspect.getfullargspec(fn) args = argspec.args if tf_inspect.isclass(fn): args = args[1:] # Remove the `self` arg. if argspec.defaults: # Remove the args which have defaults. By convention we only feed # *required args*. This means some distributions must always be wrapped # with a `lambda`, e.g., `lambda logits: tfd.Bernoulli(logits=logits)` # or `lambda probs: tfd.Bernoulli(probs=probs)`. args = args[:-len(argspec.defaults)] return tuple(args)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/dbapi_hook.py#L55-L63
def get_conn(self): """ """ db = self.get_connection(getattr(self, self.conn_name_attr)) return self.connector.connect( host=db.host, port=db.port, username=db.login, schema=db.schema)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_diffeomixture.py#L958-L972
def softmax(x, axis, name=None): """""" with tf.name_scope(name or "softmax"): x = tf.convert_to_tensor(value=x, name="x") ndims = ( tensorshape_util.rank(x.shape) if tensorshape_util.rank(x.shape) is not None else tf.rank( x, name="ndims")) axis = tf.convert_to_tensor(value=axis, dtype=tf.int32, name="axis") axis_ = tf.get_static_value(axis) if axis_ is not None: axis = np.int(ndims + axis_ if axis_ < 0 else axis_) else: axis = tf.where(axis < 0, ndims + axis, axis) return tf.nn.softmax(x, axis=axis)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_fileshare_hook.py#L83-L99
def list_directories_and_files(self, share_name, directory_name=None, **kwargs): """ """ return self.connection.list_directories_and_files(share_name, directory_name, **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sftp_hook.py#L174-L187
def retrieve_file(self, remote_full_path, local_full_path): """ """ conn = self.get_conn() self.log.info('Retrieving file from FTP: %s', remote_full_path) conn.get(remote_full_path, local_full_path) self.log.info('Finished retrieving file from FTP: %s', remote_full_path)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/dataflow_operator.py#L363-L380
def execute(self, context): """""" bucket_helper = GoogleCloudBucketHelper( self.gcp_conn_id, self.delegate_to) self.py_file = bucket_helper.google_cloud_to_local(self.py_file) hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, poll_sleep=self.poll_sleep) dataflow_options = self.dataflow_default_options.copy() dataflow_options.update(self.options) # Convert argument names from lowerCamelCase to snake case. camel_to_snake = lambda name: re.sub( r'[A-Z]', lambda x: '_' + x.group(0).lower(), name) formatted_options = {camel_to_snake(key): dataflow_options[key] for key in dataflow_options} hook.start_python_dataflow( self.job_name, formatted_options, self.py_file, self.py_options)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L473-L504
def done(self): """ """ if self._process is None: raise AirflowException("Tried to see if it's done before starting!") if self._done: return True # In case result queue is corrupted. if self._result_queue and not self._result_queue.empty(): self._result = self._result_queue.get_nowait() self._done = True self.log.debug("Waiting for %s", self._process) self._process.join() return True # Potential error case when process dies if self._result_queue and not self._process.is_alive(): self._done = True # Get the object from the queue or else join() can hang. if not self._result_queue.empty(): self._result = self._result_queue.get_nowait() self.log.debug("Waiting for %s", self._process) self._process.join() return True return False
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/hidden_markov_model.py#L926-L933
def _extract_log_probs(num_states, dist): """""" states = tf.reshape(tf.range(num_states), tf.concat([[num_states], tf.ones_like(dist.batch_shape_tensor())], axis=0)) return distribution_util.move_dimension(dist.log_prob(states), 0, -1)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1832-L1843
def executemany(self, operation, seq_of_parameters): """ """ for parameters in seq_of_parameters: self.execute(operation, parameters)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/macros/hive.py#L58-L80
def _closest_date(target_dt, date_list, before_target=None): """ """ fb = lambda d: target_dt - d if d <= target_dt else datetime.timedelta.max fa = lambda d: d - target_dt if d >= target_dt else datetime.timedelta.max fnone = lambda d: target_dt - d if d < target_dt else d - target_dt if before_target is None: return min(date_list, key=fnone).date() if before_target: return min(date_list, key=fb).date() else: return min(date_list, key=fa).date()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L247-L288
def update_database(self, instance_id, database_id, ddl_statements, project_id=None, operation_id=None): """ """ instance = self._get_client(project_id=project_id).instance( instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) try: operation = database.update_ddl( ddl_statements=ddl_statements, operation_id=operation_id) if operation: result = operation.result() self.log.info(result) return except AlreadyExists as e: if e.code == 409 and operation_id in e.message: self.log.info("Replayed update_ddl message - the operation id %s " "was already done before.", operation_id) return except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1912-L1930
def _get_convert_to_tensor_fn(identifier): """""" if identifier is None: return None if isinstance(identifier, six.string_types): identifier = str(identifier) return _deserialize(identifier) if isinstance(identifier, dict): return _deserialize(identifier) if isinstance(identifier, property): identifier = identifier.fget if callable(identifier): return identifier raise ValueError('Could not interpret ' 'convert-to-tensor function identifier:', identifier)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L1271-L1301
def xcom_push( self, key, value, execution_date=None): """ """ if execution_date and execution_date < self.execution_date: raise ValueError( 'execution_date can not be in the past (current ' 'execution_date is {}; received {})'.format( self.execution_date, execution_date)) XCom.set( key=key, value=value, task_id=self.task_id, dag_id=self.dag_id, execution_date=execution_date or self.execution_date)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/views.py#L2481-L2490
def get_query(self): """ """ return ( super().get_query() .filter(or_(models.DagModel.is_active, models.DagModel.is_paused)) .filter(~models.DagModel.is_subdag) )
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/snowflake_hook.py#L115-L129
def _get_aws_credentials(self): """ """ if self.snowflake_conn_id: connection_object = self.get_connection(self.snowflake_conn_id) if 'aws_secret_access_key' in connection_object.extra_dejson: aws_access_key_id = connection_object.extra_dejson.get( 'aws_access_key_id') aws_secret_access_key = connection_object.extra_dejson.get( 'aws_secret_access_key') return aws_access_key_id, aws_secret_access_key
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/bijectors/fill_triangular.py#L135-L153
def vector_size_to_square_matrix_size(d, validate_args, name=None): """""" if isinstance(d, (float, int, np.generic, np.ndarray)): n = (-1 + np.sqrt(1 + 8 * d)) / 2. if float(int(n)) != n: raise ValueError("Vector length is not a triangular number.") return int(n) else: with tf.name_scope(name or "vector_size_to_square_matrix_size") as name: n = (-1. + tf.sqrt(1 + 8. * tf.cast(d, dtype=tf.float32))) / 2. if validate_args: with tf.control_dependencies([ assert_util.assert_equal( tf.cast(tf.cast(n, dtype=tf.int32), dtype=tf.float32), n, message="Vector length is not a triangular number") ]): n = tf.identity(n) return tf.cast(n, d.dtype)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/optimizer/linesearch/hager_zhang.py#L545-L576
def _line_search_inner_bisection( value_and_gradients_function, search_interval, active, f_lim): """""" midpoint = (search_interval.left.x + search_interval.right.x) / 2 val_mid = value_and_gradients_function(midpoint) is_valid_mid = hzl.is_finite(val_mid) still_active = active & is_valid_mid new_failed = active & ~is_valid_mid next_inteval = search_interval._replace( failed=search_interval.failed | new_failed, func_evals=search_interval.func_evals + 1) def _apply_update(): update_result = hzl.update( value_and_gradients_function, next_inteval.left, next_inteval.right, val_mid, f_lim, active=still_active) return HagerZhangLineSearchResult( converged=next_inteval.converged, failed=next_inteval.failed | update_result.failed, iterations=next_inteval.iterations + update_result.iteration, func_evals=next_inteval.func_evals + update_result.num_evals, left=update_result.left, right=update_result.right) return prefer_static.cond( tf.reduce_any(input_tensor=still_active), _apply_update, lambda: next_inteval)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_spanner_hook.py#L291-L325
def delete_database(self, instance_id, database_id, project_id=None): """ """ instance = self._get_client(project_id=project_id).\ instance(instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) if not database.exists(): self.log.info("The database {} is already deleted from instance {}. " "Exiting.".format(database_id, instance_id)) return try: operation = database.drop() # type: Operation except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result) return
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_data_lake_hook.py#L41-L53
def get_conn(self): """""" conn = self.get_connection(self.conn_id) service_options = conn.extra_dejson self.account_name = service_options.get('account_name') adlCreds = lib.auth(tenant_id=service_options.get('tenant'), client_secret=conn.password, client_id=conn.login) adlsFileSystemClient = core.AzureDLFileSystem(adlCreds, store_name=self.account_name) adlsFileSystemClient.connect() return adlsFileSystemClient
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L627-L686
def check_status(self, job_name, key, describe_function, check_interval, max_ingestion_time, non_terminal_states=None): """ """ if not non_terminal_states: non_terminal_states = self.non_terminal_states sec = 0 running = True while running: time.sleep(check_interval) sec = sec + check_interval try: response = describe_function(job_name) status = response[key] self.log.info('Job still running for %s seconds... ' 'current status is %s' % (sec, status)) except KeyError: raise AirflowException('Could not get status of the SageMaker job') except ClientError: raise AirflowException('AWS request failed, check logs for more info') if status in non_terminal_states: running = True elif status in self.failed_states: raise AirflowException('SageMaker job failed because %s' % response['FailureReason']) else: running = False if max_ingestion_time and sec > max_ingestion_time: # ensure that the job gets killed if the max ingestion time is exceeded raise AirflowException('SageMaker job took more than %s seconds', max_ingestion_time) self.log.info('SageMaker Job Compeleted') response = describe_function(job_name) return response
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L897-L954
def _process_task_instances(self, dag, queue, session=None): """ """ # update the state of the previously active dag runs dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session) active_dag_runs = [] for run in dag_runs: self.log.info("Examining DAG run %s", run) # don't consider runs that are executed in the future if run.execution_date > timezone.utcnow(): self.log.error( "Execution date is in future: %s", run.execution_date ) continue if len(active_dag_runs) >= dag.max_active_runs: self.log.info("Number of active dag runs reached max_active_run.") break # skip backfill dagruns for now as long as they are not really scheduled if run.is_backfill: continue # todo: run.dag is transient but needs to be set run.dag = dag # todo: preferably the integrity check happens at dag collection time run.verify_integrity(session=session) run.update_state(session=session) if run.state == State.RUNNING: make_transient(run) active_dag_runs.append(run) for run in active_dag_runs: self.log.debug("Examining active DAG run: %s", run) # this needs a fresh session sometimes tis get detached tis = run.get_task_instances(state=(State.NONE, State.UP_FOR_RETRY, State.UP_FOR_RESCHEDULE)) # this loop is quite slow as it uses are_dependencies_met for # every task (in ti.is_runnable). This is also called in # update_state above which has already checked these tasks for ti in tis: task = dag.get_task(ti.task_id) # fixme: ti.task is transient but needs to be set ti.task = task if ti.are_dependencies_met( dep_context=DepContext(flag_upstream_failed=True), session=session): self.log.debug('Queuing task: %s', ti) queue.append(ti.key)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L41-L51
def makedir_exist_ok(dirpath): """ """ try: os.makedirs(dirpath) except OSError as e: if e.errno == errno.EEXIST: pass else: raise
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagbag.py#L274-L303
def kill_zombies(self, zombies, session=None): """ """ from airflow.models.taskinstance import TaskInstance # Avoid circular import for zombie in zombies: if zombie.dag_id in self.dags: dag = self.dags[zombie.dag_id] if zombie.task_id in dag.task_ids: task = dag.get_task(zombie.task_id) ti = TaskInstance(task, zombie.execution_date) # Get properties needed for failure handling from SimpleTaskInstance. ti.start_date = zombie.start_date ti.end_date = zombie.end_date ti.try_number = zombie.try_number ti.state = zombie.state ti.test_mode = configuration.getboolean('core', 'unit_test_mode') ti.handle_failure("{} detected as zombie".format(ti), ti.test_mode, ti.get_template_context()) self.log.info( 'Marked zombie job %s as %s', ti, ti.state) Stats.incr('zombies_killed') session.commit()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L36-L111
def date_range(start_date, end_date=None, num=None, delta=None): """ """ if not delta: return [] if end_date and start_date > end_date: raise Exception("Wait. start_date needs to be before end_date") if end_date and num: raise Exception("Wait. Either specify end_date OR num") if not end_date and not num: end_date = timezone.utcnow() delta_iscron = False tz = start_date.tzinfo if isinstance(delta, six.string_types): delta_iscron = True start_date = timezone.make_naive(start_date, tz) cron = croniter(delta, start_date) elif isinstance(delta, timedelta): delta = abs(delta) dates = [] if end_date: if timezone.is_naive(start_date): end_date = timezone.make_naive(end_date, tz) while start_date <= end_date: if timezone.is_naive(start_date): dates.append(timezone.make_aware(start_date, tz)) else: dates.append(start_date) if delta_iscron: start_date = cron.get_next(datetime) else: start_date += delta else: for _ in range(abs(num)): if timezone.is_naive(start_date): dates.append(timezone.make_aware(start_date, tz)) else: dates.append(start_date) if delta_iscron: if num > 0: start_date = cron.get_next(datetime) else: start_date = cron.get_prev(datetime) else: if num > 0: start_date += delta else: start_date -= delta return sorted(dates)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L284-L462
def covariance(x, y=None, sample_axis=0, event_axis=-1, keepdims=False, name=None): """ """ with tf.compat.v1.name_scope( name, 'covariance', values=[x, y, event_axis, sample_axis]): x = tf.convert_to_tensor(value=x, name='x') # Covariance *only* uses the centered versions of x (and y). x -= tf.reduce_mean(input_tensor=x, axis=sample_axis, keepdims=True) if y is None: y = x else: y = tf.convert_to_tensor(value=y, name='y', dtype=x.dtype) # If x and y have different shape, sample_axis and event_axis will likely # be wrong for one of them! x.shape.assert_is_compatible_with(y.shape) y -= tf.reduce_mean(input_tensor=y, axis=sample_axis, keepdims=True) if event_axis is None: return tf.reduce_mean( input_tensor=x * tf.math.conj(y), axis=sample_axis, keepdims=keepdims) if sample_axis is None: raise ValueError( 'sample_axis was None, which means all axis hold events, and this ' 'overlaps with event_axis ({})'.format(event_axis)) event_axis = _make_positive_axis(event_axis, tf.rank(x)) sample_axis = _make_positive_axis(sample_axis, tf.rank(x)) # If we get lucky and axis is statically defined, we can do some checks. if _is_list_like(event_axis) and _is_list_like(sample_axis): if set(event_axis).intersection(sample_axis): raise ValueError( 'sample_axis ({}) and event_axis ({}) overlapped'.format( sample_axis, event_axis)) if (np.diff(sorted(event_axis)) > 1).any(): raise ValueError( 'event_axis must be contiguous. Found: {}'.format(event_axis)) batch_axis = list( sorted( set(range(x.shape.ndims)).difference(sample_axis + event_axis))) else: batch_axis, _ = tf.compat.v1.setdiff1d( tf.range(0, tf.rank(x)), tf.concat((sample_axis, event_axis), 0)) event_axis = tf.convert_to_tensor( value=event_axis, name='event_axis', dtype=tf.int32) sample_axis = tf.convert_to_tensor( value=sample_axis, name='sample_axis', dtype=tf.int32) batch_axis = tf.convert_to_tensor( value=batch_axis, name='batch_axis', dtype=tf.int32) # Permute x/y until shape = B + E + S perm_for_xy = tf.concat((batch_axis, event_axis, sample_axis), 0) x_permed = tf.transpose(a=x, perm=perm_for_xy) y_permed = tf.transpose(a=y, perm=perm_for_xy) batch_ndims = tf.size(input=batch_axis) batch_shape = tf.shape(input=x_permed)[:batch_ndims] event_ndims = tf.size(input=event_axis) event_shape = tf.shape(input=x_permed)[batch_ndims:batch_ndims + event_ndims] sample_shape = tf.shape(input=x_permed)[batch_ndims + event_ndims:] sample_ndims = tf.size(input=sample_shape) n_samples = tf.reduce_prod(input_tensor=sample_shape) n_events = tf.reduce_prod(input_tensor=event_shape) # Flatten sample_axis into one long dim. x_permed_flat = tf.reshape( x_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0)) y_permed_flat = tf.reshape( y_permed, tf.concat((batch_shape, event_shape, [n_samples]), 0)) # Do the same for event_axis. x_permed_flat = tf.reshape( x_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0)) y_permed_flat = tf.reshape( y_permed, tf.concat((batch_shape, [n_events], [n_samples]), 0)) # After matmul, cov.shape = batch_shape + [n_events, n_events] cov = tf.matmul( x_permed_flat, y_permed_flat, adjoint_b=True) / tf.cast( n_samples, x.dtype) # Insert some singletons to make # cov.shape = batch_shape + event_shape**2 + [1,...,1] # This is just like x_permed.shape, except the sample_axis is all 1's, and # the [n_events] became event_shape**2. cov = tf.reshape( cov, tf.concat( ( batch_shape, # event_shape**2 used here because it is the same length as # event_shape, and has the same number of elements as one # batch of covariance. event_shape**2, tf.ones([sample_ndims], tf.int32)), 0)) # Permuting by the argsort inverts the permutation, making # cov.shape have ones in the position where there were samples, and # [n_events * n_events] in the event position. cov = tf.transpose(a=cov, perm=tf.math.invert_permutation(perm_for_xy)) # Now expand event_shape**2 into event_shape + event_shape. # We here use (for the first time) the fact that we require event_axis to be # contiguous. e_start = event_axis[0] e_len = 1 + event_axis[-1] - event_axis[0] cov = tf.reshape( cov, tf.concat((tf.shape(input=cov)[:e_start], event_shape, event_shape, tf.shape(input=cov)[e_start + e_len:]), 0)) # tf.squeeze requires python ints for axis, not Tensor. This is enough to # require our axis args to be constants. if not keepdims: squeeze_axis = tf.where(sample_axis < e_start, sample_axis, sample_axis + e_len) cov = _squeeze(cov, axis=squeeze_axis) return cov
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/api/experimental/endpoints.py#L114-L131
def dag_runs(dag_id): """ """ try: state = request.args.get('state') dagruns = get_dag_runs(dag_id, state) except AirflowException as err: _log.info(err) response = jsonify(error="{}".format(err)) response.status_code = 400 return response return jsonify(dagruns)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L101-L122
def create_collection(self, collection_name, database_name=None): """ """ if collection_name is None: raise AirflowBadRequest("Collection name cannot be None.") # We need to check to see if this container already exists so we don't try # to create it twice existing_container = list(self.get_conn().QueryContainers( get_database_link(self.__get_database_name(database_name)), { "query": "SELECT * FROM r WHERE r.id=@id", "parameters": [ {"name": "@id", "value": collection_name} ] })) # Only create if we did not find it already existing if len(existing_container) == 0: self.get_conn().CreateContainer( get_database_link(self.__get_database_name(database_name)), {"id": collection_name})
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/uniform.py#L214-L242
def _kl_uniform_uniform(a, b, name=None): """ """ with tf.name_scope(name or "kl_uniform_uniform"): # Consistent with # http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 60 # Watch out for the change in conventions--they use 'a' and 'b' to refer to # lower and upper bounds respectively there. final_batch_shape = distribution_util.get_broadcast_shape( a.low, b.low, a.high, b.high) dtype = dtype_util.common_dtype( [a.low, a.high, b.low, b.high], tf.float32) return tf.where((b.low <= a.low) & (a.high <= b.high), tf.math.log(b.high - b.low) - tf.math.log(a.high - a.low), tf.broadcast_to( dtype_util.as_numpy_dtype(dtype)(np.inf), final_batch_shape))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L2071-L2076
def _validate_value(key, value, expected_type): """ """ if not isinstance(value, expected_type): raise TypeError("{} argument must have a type {} not {}".format( key, expected_type, type(value)))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/decorators.py#L61-L94
def gzipped(f): """ """ @functools.wraps(f) def view_func(*args, **kwargs): @after_this_request def zipper(response): accept_encoding = request.headers.get('Accept-Encoding', '') if 'gzip' not in accept_encoding.lower(): return response response.direct_passthrough = False if (response.status_code < 200 or response.status_code >= 300 or 'Content-Encoding' in response.headers): return response gzip_buffer = IO() gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer) gzip_file.write(response.data) gzip_file.close() response.data = gzip_buffer.getvalue() response.headers['Content-Encoding'] = 'gzip' response.headers['Vary'] = 'Accept-Encoding' response.headers['Content-Length'] = len(response.data) return response return f(*args, **kwargs) return view_func
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L300-L372
def load_df( self, df, table, field_dict=None, delimiter=',', encoding='utf8', pandas_kwargs=None, **kwargs): """ """ def _infer_field_types_from_df(df): DTYPE_KIND_HIVE_TYPE = { 'b': 'BOOLEAN', # boolean 'i': 'BIGINT', # signed integer 'u': 'BIGINT', # unsigned integer 'f': 'DOUBLE', # floating-point 'c': 'STRING', # complex floating-point 'M': 'TIMESTAMP', # datetime 'O': 'STRING', # object 'S': 'STRING', # (byte-)string 'U': 'STRING', # Unicode 'V': 'STRING' # void } d = OrderedDict() for col, dtype in df.dtypes.iteritems(): d[col] = DTYPE_KIND_HIVE_TYPE[dtype.kind] return d if pandas_kwargs is None: pandas_kwargs = {} with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir: with NamedTemporaryFile(dir=tmp_dir, mode="w") as f: if field_dict is None: field_dict = _infer_field_types_from_df(df) df.to_csv(path_or_buf=f, sep=delimiter, header=False, index=False, encoding=encoding, date_format="%Y-%m-%d %H:%M:%S", **pandas_kwargs) f.flush() return self.load_file(filepath=f.name, table=table, delimiter=delimiter, field_dict=field_dict, **kwargs)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/logistic_regression.py#L134-L152
def build_input_pipeline(x, y, batch_size): """ """ training_dataset = tf.data.Dataset.from_tensor_slices((x, y)) training_batches = training_dataset.repeat().batch(batch_size) training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches) batch_features, batch_labels = training_iterator.get_next() return batch_features, batch_labels
https://github.com/asciimoo/searx/blob/a84caa22cf947e973c10aa968d35fb2bdda6d048/searx/webapp.py#L668-L725
def preferences(): """""" # save preferences if request.method == 'POST': resp = make_response(redirect(urljoin(settings['server']['base_url'], url_for('index')))) try: request.preferences.parse_form(request.form) except ValidationException: request.errors.append(gettext('Invalid settings, please edit your preferences')) return resp return request.preferences.save(resp) # render preferences image_proxy = request.preferences.get_value('image_proxy') lang = request.preferences.get_value('language') disabled_engines = request.preferences.engines.get_disabled() allowed_plugins = request.preferences.plugins.get_enabled() # stats for preferences page stats = {} for c in categories: for e in categories[c]: stats[e.name] = {'time': None, 'warn_timeout': False, 'warn_time': False} if e.timeout > settings['outgoing']['request_timeout']: stats[e.name]['warn_timeout'] = True stats[e.name]['supports_selected_language'] = _is_selected_language_supported(e, request.preferences) # get first element [0], the engine time, # and then the second element [1] : the time (the first one is the label) for engine_stat in get_engines_stats()[0][1]: stats[engine_stat.get('name')]['time'] = round(engine_stat.get('avg'), 3) if engine_stat.get('avg') > settings['outgoing']['request_timeout']: stats[engine_stat.get('name')]['warn_time'] = True # end of stats return render('preferences.html', locales=settings['locales'], current_locale=get_locale(), image_proxy=image_proxy, engines_by_category=categories, stats=stats, answerers=[{'info': a.self_info(), 'keywords': a.keywords} for a in answerers], disabled_engines=disabled_engines, autocomplete_backends=autocomplete_backends, shortcuts={y: x for x, y in engine_shortcuts.items()}, themes=themes, plugins=plugins, doi_resolvers=settings['doi_resolvers'], current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')), allowed_plugins=allowed_plugins, theme=get_current_theme_name(), preferences_url_params=request.preferences.get_as_url_params(), base_url=get_base_url(), preferences=True)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/finite_discrete.py#L248-L297
def _maybe_validate_args(outcomes, logits, probs, validate_args): """""" assertions = [] def validate_equal_last_dim(tensor_a, tensor_b, message): if tensor_a.shape.is_fully_defined() and tensor_b.shape.is_fully_defined(): if tensor_a.shape[-1] != tensor_b.shape[-1]: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_equal( tf.shape(input=tensor_a)[-1], tf.shape(input=tensor_b)[-1], message=message)) if logits is not None: validate_equal_last_dim( outcomes, logits, message='Last dimension of outcomes and logits must be equal size.') if probs is not None: validate_equal_last_dim( outcomes, probs, message='Last dimension of outcomes and probs must be equal size.') message = 'Rank of outcomes must be 1.' if outcomes.shape.ndims is not None: if outcomes.shape.ndims != 1: raise ValueError(message) elif validate_args: assertions.append(tf.compat.v1.assert_rank(outcomes, 1, message=message)) message = 'Size of outcomes must be greater than 0.' if outcomes.shape.num_elements() is not None: if outcomes.shape.num_elements() == 0: raise ValueError(message) elif validate_args: assertions.append( tf.compat.v1.assert_greater( tf.size(input=outcomes), 0, message=message)) if validate_args: assertions.append( tf.compat.v1.assert_equal( tf.math.is_strictly_increasing(outcomes), True, message='outcomes is not strictly increasing.')) return assertions
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/no_u_turn_sampler/logistic_regression.py#L58-L64
def logistic_regression(features): """""" coeffs = ed.MultivariateNormalDiag( loc=tf.zeros(features.shape[1]), name="coeffs") labels = ed.Bernoulli( logits=tf.tensordot(features, coeffs, [[1], [0]]), name="labels") return labels
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/special_math.py#L186-L288
def _ndtri(p): """""" # Constants used in piece-wise rational approximations. Taken from the cephes # library: # https://root.cern.ch/doc/v608/SpecFuncCephesInv_8cxx_source.html p0 = list(reversed([-5.99633501014107895267E1, 9.80010754185999661536E1, -5.66762857469070293439E1, 1.39312609387279679503E1, -1.23916583867381258016E0])) q0 = list(reversed([1.0, 1.95448858338141759834E0, 4.67627912898881538453E0, 8.63602421390890590575E1, -2.25462687854119370527E2, 2.00260212380060660359E2, -8.20372256168333339912E1, 1.59056225126211695515E1, -1.18331621121330003142E0])) p1 = list(reversed([4.05544892305962419923E0, 3.15251094599893866154E1, 5.71628192246421288162E1, 4.40805073893200834700E1, 1.46849561928858024014E1, 2.18663306850790267539E0, -1.40256079171354495875E-1, -3.50424626827848203418E-2, -8.57456785154685413611E-4])) q1 = list(reversed([1.0, 1.57799883256466749731E1, 4.53907635128879210584E1, 4.13172038254672030440E1, 1.50425385692907503408E1, 2.50464946208309415979E0, -1.42182922854787788574E-1, -3.80806407691578277194E-2, -9.33259480895457427372E-4])) p2 = list(reversed([3.23774891776946035970E0, 6.91522889068984211695E0, 3.93881025292474443415E0, 1.33303460815807542389E0, 2.01485389549179081538E-1, 1.23716634817820021358E-2, 3.01581553508235416007E-4, 2.65806974686737550832E-6, 6.23974539184983293730E-9])) q2 = list(reversed([1.0, 6.02427039364742014255E0, 3.67983563856160859403E0, 1.37702099489081330271E0, 2.16236993594496635890E-1, 1.34204006088543189037E-2, 3.28014464682127739104E-4, 2.89247864745380683936E-6, 6.79019408009981274425E-9])) def _create_polynomial(var, coeffs): """Compute n_th order polynomial via Horner's method.""" coeffs = np.array(coeffs, dtype_util.as_numpy_dtype(var.dtype)) if not coeffs.size: return tf.zeros_like(var) return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var maybe_complement_p = tf.where(p > -np.expm1(-2.), 1. - p, p) # Write in an arbitrary value in place of 0 for p since 0 will cause NaNs # later on. The result from the computation when p == 0 is not used so any # number that doesn't result in NaNs is fine. sanitized_mcp = tf.where( maybe_complement_p <= 0., tf.fill(tf.shape(input=p), dtype_util.as_numpy_dtype(p.dtype)(0.5)), maybe_complement_p) # Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2). w = sanitized_mcp - 0.5 ww = w ** 2 x_for_big_p = w + w * ww * (_create_polynomial(ww, p0) / _create_polynomial(ww, q0)) x_for_big_p *= -np.sqrt(2. * np.pi) # Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z), # where z = sqrt(-2. * log(p)), and P/Q are chosen between two different # arrays based on whether p < exp(-32). z = tf.sqrt(-2. * tf.math.log(sanitized_mcp)) first_term = z - tf.math.log(z) / z second_term_small_p = ( _create_polynomial(1. / z, p2) / _create_polynomial(1. / z, q2) / z) second_term_otherwise = ( _create_polynomial(1. / z, p1) / _create_polynomial(1. / z, q1) / z) x_for_small_p = first_term - second_term_small_p x_otherwise = first_term - second_term_otherwise x = tf.where(sanitized_mcp > np.exp(-2.), x_for_big_p, tf.where(z >= 8.0, x_for_small_p, x_otherwise)) x = tf.where(p > 1. - np.exp(-2.), x, -x) infinity_scalar = tf.constant(np.inf, dtype=p.dtype) infinity = tf.fill(tf.shape(input=p), infinity_scalar) x_nan_replaced = tf.where( p <= 0.0, -infinity, tf.where(p >= 1.0, infinity, x)) return x_nan_replaced
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_kms_hook.py#L58-L83
def encrypt(self, key_name, plaintext, authenticated_data=None): """ """ keys = self.get_conn().projects().locations().keyRings().cryptoKeys() body = {'plaintext': _b64encode(plaintext)} if authenticated_data: body['additionalAuthenticatedData'] = _b64encode(authenticated_data) request = keys.encrypt(name=key_name, body=body) response = request.execute(num_retries=self.num_retries) ciphertext = response['ciphertext'] return ciphertext
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/edward2/interceptor.py#L175-L195
def interceptable(func): """ """ @functools.wraps(func) def func_wrapped(*args, **kwargs): with get_next_interceptor() as interceptor: return interceptor(func, *args, **kwargs) return func_wrapped
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/file.py#L42-L59
def mkdirs(path, mode): """ """ try: o_umask = os.umask(0) os.makedirs(path, mode) except OSError: if not os.path.isdir(path): raise finally: os.umask(o_umask)
https://github.com/pytorch/vision/blob/3afcf3cd49661c466c75ea536b0b2a7ff57f9a05/torchvision/datasets/utils.py#L139-L171
def download_file_from_google_drive(file_id, root, filename=None, md5=None): """ """ # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url import requests url = "https://docs.google.com/uc?export=download" root = os.path.expanduser(root) if not filename: filename = file_id fpath = os.path.join(root, filename) makedir_exist_ok(root) if os.path.isfile(fpath) and check_integrity(fpath, md5): print('Using downloaded and verified file: ' + fpath) else: session = requests.Session() response = session.get(url, params={'id': file_id}, stream=True) token = _get_confirm_token(response) if token: params = {'id': file_id, 'confirm': token} response = session.get(url, params=params, stream=True) _save_response_content(response, fpath)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mssql_to_gcs.py#L201-L211
def _upload_to_gcs(self, files_to_upload): """ """ hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to) for object_name, tmp_file_handle in files_to_upload.items(): hook.upload(self.bucket, object_name, tmp_file_handle.name, 'application/json', (self.gzip if object_name != self.schema_filename else False))
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/pool.py#L60-L69
def open_slots(self, session): """ """ from airflow.models.taskinstance import \ TaskInstance as TI # Avoid circular import used_slots = session.query(func.count()).filter(TI.pool == self.pool).filter( TI.state.in_([State.RUNNING, State.QUEUED])).scalar() return self.slots - used_slots
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/backend/numpy/math.py#L191-L202
def _reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None): # pylint: disable=unused-argument """""" try: return scipy_special.logsumexp( input_tensor, axis=_astuple(axis), keepdims=keepdims) except NotImplementedError: # We offer a non SP version just in case SP isn't installed and this # because logsumexp is often used. m = _max_mask_non_finite(input_tensor, axis=axis, keepdims=True) y = input_tensor - m y = np.exp(y, out=y) return m + np.log(np.sum(y, axis=_astuple(axis), keepdims=keepdims))
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/wanmen.py#L69-L84
def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir='.', merge=True, info_only=False, **kwargs): """""" html = json_api_content title = _wanmen_get_title_by_json_topic_part(html, tIndex, pIndex) bokeccID = _wanmen_get_boke_id_by_json_topic_part(html, tIndex, pIndex) bokecc_download_by_id(vid = bokeccID, title = title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L134-L155
def tar_and_s3_upload(self, path, key, bucket): """ """ with tempfile.TemporaryFile() as temp_file: if os.path.isdir(path): files = [os.path.join(path, name) for name in os.listdir(path)] else: files = [path] with tarfile.open(mode='w:gz', fileobj=temp_file) as tar_file: for f in files: tar_file.add(f, arcname=os.path.basename(f)) temp_file.seek(0) self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/wasb_hook.py#L84-L100
def load_file(self, file_path, container_name, blob_name, **kwargs): """ """ # Reorder the argument order from airflow.hooks.S3_hook.load_file. self.connection.create_blob_from_path(container_name, blob_name, file_path, **kwargs)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/slack_webhook_operator.py#L84-L99
def execute(self, context): """ """ self.hook = SlackWebhookHook( self.http_conn_id, self.webhook_token, self.message, self.attachments, self.channel, self.username, self.icon_emoji, self.link_names, self.proxy ) self.hook.execute()
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py#L83-L93
def get_hook(self): """ """ if not hasattr(self, 'hook'): from airflow.contrib.hooks.aws_glue_catalog_hook import AwsGlueCatalogHook self.hook = AwsGlueCatalogHook( aws_conn_id=self.aws_conn_id, region_name=self.region_name) return self.hook
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/executors/kubernetes_executor.py#L493-L511
def _make_safe_label_value(string): """ """ MAX_LABEL_LEN = 63 safe_label = re.sub(r'^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$', '', string) if len(safe_label) > MAX_LABEL_LEN or string != safe_label: safe_hash = hashlib.md5(string.encode()).hexdigest()[:9] safe_label = safe_label[:MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash return safe_label
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L887-L927
def sample_dynamic_prior(self, samples, batch_size, length, fixed=False): """ """ if fixed: sample_batch_size = 1 else: sample_batch_size = batch_size sample, state = self.dynamic_prior.zero_state([samples, sample_batch_size]) locs = [] scale_diags = [] sample_list = [] for _ in range(length): dist, state = self.dynamic_prior(sample, state) sample = dist.sample() locs.append(dist.parameters["loc"]) scale_diags.append(dist.parameters["scale_diag"]) sample_list.append(sample) sample = tf.stack(sample_list, axis=2) loc = tf.stack(locs, axis=2) scale_diag = tf.stack(scale_diags, axis=2) if fixed: # tile along the batch axis sample = sample + tf.zeros([batch_size, 1, 1]) return sample, tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/sts/fitting.py#L267-L282
def _minimize_in_graph(build_loss_fn, num_steps=200, optimizer=None): """""" optimizer = tf.compat.v1.train.AdamOptimizer( 0.1) if optimizer is None else optimizer def train_loop_body(step): train_op = optimizer.minimize( build_loss_fn if tf.executing_eagerly() else build_loss_fn()) return tf.tuple(tensors=[tf.add(step, 1)], control_inputs=[train_op]) minimize_op = tf.compat.v1.while_loop( cond=lambda step: step < num_steps, body=train_loop_body, loop_vars=[tf.constant(0)], return_same_structure=True)[0] # Always return a single op. return minimize_op
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L477-L513
def insert_object_acl(self, bucket_name, object_name, entity, role, user_project=None): """ """ self.log.info('Creating a new ACL entry for object: %s in bucket: %s', object_name, bucket_name) client = self.get_conn() bucket = client.bucket(bucket_name=bucket_name) blob = bucket.blob(object_name) # Reload fetches the current ACL from Cloud Storage. blob.acl.reload() blob.acl.entity_from_dict(entity_dict={"entity": entity, "role": role}) if user_project: blob.acl.user_project = user_project blob.acl.save() self.log.info('A new ACL entry created for object: %s in bucket: %s', object_name, bucket_name)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/vector_exponential_linear_operator.py#L278-L287
def _mode_mean_shape(self): """""" shape = tensorshape_util.concatenate(self.batch_shape, self.event_shape) has_static_shape = tensorshape_util.is_fully_defined(shape) if not has_static_shape: shape = tf.concat([ self.batch_shape_tensor(), self.event_shape_tensor(), ], 0) return shape
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L690-L822
def _check_and_change_state_before_execution( self, verbose=True, ignore_all_deps=False, ignore_depends_on_past=False, ignore_task_deps=False, ignore_ti_state=False, mark_success=False, test_mode=False, job_id=None, pool=None, session=None): """ """ task = self.task self.pool = pool or task.pool self.test_mode = test_mode self.refresh_from_db(session=session, lock_for_update=True) self.job_id = job_id self.hostname = get_hostname() self.operator = task.__class__.__name__ if not ignore_all_deps and not ignore_ti_state and self.state == State.SUCCESS: Stats.incr('previously_succeeded', 1, 1) queue_dep_context = DepContext( deps=QUEUE_DEPS, ignore_all_deps=ignore_all_deps, ignore_ti_state=ignore_ti_state, ignore_depends_on_past=ignore_depends_on_past, ignore_task_deps=ignore_task_deps) if not self.are_dependencies_met( dep_context=queue_dep_context, session=session, verbose=True): session.commit() return False # TODO: Logging needs cleanup, not clear what is being printed hr = "\n" + ("-" * 80) # Line break # For reporting purposes, we report based on 1-indexed, # not 0-indexed lists (i.e. Attempt 1 instead of # Attempt 0 for the first attempt). # Set the task start date. In case it was re-scheduled use the initial # start date that is recorded in task_reschedule table self.start_date = timezone.utcnow() task_reschedules = TaskReschedule.find_for_task_instance(self, session) if task_reschedules: self.start_date = task_reschedules[0].start_date dep_context = DepContext( deps=RUN_DEPS - QUEUE_DEPS, ignore_all_deps=ignore_all_deps, ignore_depends_on_past=ignore_depends_on_past, ignore_task_deps=ignore_task_deps, ignore_ti_state=ignore_ti_state) runnable = self.are_dependencies_met( dep_context=dep_context, session=session, verbose=True) if not runnable and not mark_success: # FIXME: we might have hit concurrency limits, which means we probably # have been running prematurely. This should be handled in the # scheduling mechanism. self.state = State.NONE self.log.warning(hr) self.log.warning( "FIXME: Rescheduling due to concurrency limits reached at task runtime. Attempt %s of " "%s. State set to NONE.", self.try_number, self.max_tries + 1 ) self.log.warning(hr) self.queued_dttm = timezone.utcnow() self.log.info("Queuing into pool %s", self.pool) session.merge(self) session.commit() return False # Another worker might have started running this task instance while # the current worker process was blocked on refresh_from_db if self.state == State.RUNNING: self.log.warning("Task Instance already running %s", self) session.commit() return False # print status message self.log.info(hr) self.log.info("Starting attempt %s of %s", self.try_number, self.max_tries + 1) self.log.info(hr) self._try_number += 1 if not test_mode: session.add(Log(State.RUNNING, self)) self.state = State.RUNNING self.pid = os.getpid() self.end_date = None if not test_mode: session.merge(self) session.commit() # Closing all pooled connections to prevent # "max number of connections reached" settings.engine.dispose() if verbose: if mark_success: self.log.info("Marking success for %s on %s", self.task, self.execution_date) else: self.log.info("Executing %s on %s", self.task, self.execution_date) return True
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/hidden_markov_model.py#L908-L911
def _log_vector_matrix(vs, ms): """""" return tf.reduce_logsumexp(input_tensor=vs[..., tf.newaxis] + ms, axis=-2)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/configuration.py#L309-L344
def getsection(self, section): """ """ if (section not in self._sections and section not in self.airflow_defaults._sections): return None _section = copy.deepcopy(self.airflow_defaults._sections[section]) if section in self._sections: _section.update(copy.deepcopy(self._sections[section])) section_prefix = 'AIRFLOW__{S}__'.format(S=section.upper()) for env_var in sorted(os.environ.keys()): if env_var.startswith(section_prefix): key = env_var.replace(section_prefix, '').lower() _section[key] = self._get_env_var_option(section, key) for key, val in iteritems(_section): try: val = int(val) except ValueError: try: val = float(val) except ValueError: if val.lower() in ('t', 'true'): val = True elif val.lower() in ('f', 'false'): val = False _section[key] = val return _section
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/distribution_layer.py#L1309-L1349
def _make_kl_divergence_fn( distribution_b, use_exact_kl=False, test_points_reduce_axis=(), # `None` == "all"; () == "none". test_points_fn=tf.convert_to_tensor, weight=None): """""" if use_exact_kl is None: kl_divergence_fn = tfd.kl_divergence else: # Closure over: test_points_fn, test_points_reduce_axis. def kl_divergence_fn(distribution_a, distribution_b): z = test_points_fn(distribution_a) return tf.reduce_mean( input_tensor=distribution_a.log_prob(z) - distribution_b.log_prob(z), axis=test_points_reduce_axis) # Closure over: distribution_b, kl_divergence_fn, weight. def _fn(distribution_a): """Closure that computes KLDiv as a function of `a` as in `KL[a, b]`.""" with tf.compat.v1.name_scope('kldivergence_loss'): # TODO(b/119756336): Due to eager/graph Jacobian graph caching bug # we add here the capability for deferred construction of the prior. # This capability can probably be removed once b/119756336 is resolved. distribution_b_ = (distribution_b() if callable(distribution_b) else distribution_b) kl = kl_divergence_fn(distribution_a, distribution_b_) if weight is not None: kl = tf.cast(weight, dtype=kl.dtype) * kl # Losses appended with the model.add_loss and are expected to be a single # scalar, unlike model.loss, which is expected to be the loss per sample. # Therefore, we reduce over all dimensions, regardless of the shape. # We take the sum because (apparently) Keras will add this to the *post* # `reduce_sum` (total) loss. # TODO(b/126259176): Add end-to-end Keras/TFP test to ensure the API's # align, particularly wrt how losses are aggregated (across batch # members). return tf.reduce_sum(input_tensor=kl, name='batch_total_kl_divergence') return _fn
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/slice_sampler_kernel.py#L381-L537
def _sample_next(target_log_prob_fn, current_state_parts, step_sizes, max_doublings, current_target_log_prob, batch_rank, seed=None, name=None): """ """ with tf.compat.v1.name_scope(name, 'sample_next', [ current_state_parts, step_sizes, max_doublings, current_target_log_prob, batch_rank ]): # First step: Choose a random direction. # Direction is a list of tensors. The i'th tensor should have the same shape # as the i'th state part. direction = _choose_random_direction(current_state_parts, batch_rank=batch_rank, seed=seed) # Interpolates the step sizes for the chosen direction. # Applies an ellipsoidal interpolation to compute the step direction for # the chosen direction. Suppose we are given step sizes for each direction. # Label these s_1, s_2, ... s_k. These are the step sizes to use if moving # in a direction parallel to one of the axes. Consider an ellipsoid which # intercepts the i'th axis at s_i. The step size for a direction specified # by the unit vector (n_1, n_2 ...n_k) is then defined as the intersection # of the line through this vector with this ellipsoid. # # One can show that the length of the vector from the origin to the # intersection point is given by: # 1 / sqrt(n_1^2 / s_1^2 + n_2^2 / s_2^2 + ...). # # Proof: # The equation of the ellipsoid is: # Sum_i [x_i^2 / s_i^2 ] = 1. Let n be a unit direction vector. Points # along the line given by n may be parameterized as alpha*n where alpha is # the distance along the vector. Plugging this into the equation for the # ellipsoid, we get: # alpha^2 ( n_1^2 / s_1^2 + n_2^2 / s_2^2 + ...) = 1 # so alpha = \sqrt { \frac{1} { ( n_1^2 / s_1^2 + n_2^2 / s_2^2 + ...) } } reduce_axes = [tf.range(batch_rank, tf.rank(dirn_part)) for dirn_part in direction] components = [ tf.reduce_sum( input_tensor=(dirn_part / step_size)**2, axis=reduce_axes[i]) for i, (step_size, dirn_part) in enumerate(zip(step_sizes, direction)) ] step_size = tf.math.rsqrt(tf.add_n(components)) # Computes the rank of a tensor. Uses the static rank if possible. def _get_rank(x): return (len(x.shape.as_list()) if x.shape.dims is not None else tf.rank(x)) state_part_ranks = [_get_rank(part) for part in current_state_parts] def _step_along_direction(alpha): """Converts the scalar alpha into an n-dim vector with full state info. Computes x_0 + alpha * direction where x_0 is the current state and direction is the direction chosen above. Args: alpha: A tensor of shape equal to the batch dimensions of `current_state_parts`. Returns: state_parts: Tensor or Python list of `Tensor`s representing the state(s) of the Markov chain(s) for a given alpha and a given chosen direction. Has the same shape as `current_state_parts`. """ padded_alphas = [_right_pad(alpha, final_rank=part_rank) for part_rank in state_part_ranks] state_parts = [state_part + padded_alpha * direction_part for state_part, direction_part, padded_alpha in zip(current_state_parts, direction, padded_alphas)] return state_parts def projected_target_log_prob_fn(alpha): """The target log density projected along the chosen direction. Args: alpha: A tensor of shape equal to the batch dimensions of `current_state_parts`. Returns: Target log density evaluated at x_0 + alpha * direction where x_0 is the current state and direction is the direction chosen above. Has the same shape as `alpha`. """ return target_log_prob_fn(*_step_along_direction(alpha)) alpha_init = tf.zeros_like(current_target_log_prob, dtype=current_state_parts[0].dtype.base_dtype) [ next_alpha, next_target_log_prob, bounds_satisfied, upper_bounds, lower_bounds ] = ssu.slice_sampler_one_dim(projected_target_log_prob_fn, x_initial=alpha_init, max_doublings=max_doublings, step_size=step_size, seed=seed) return [ _step_along_direction(next_alpha), next_target_log_prob, bounds_satisfied, direction, upper_bounds, lower_bounds ]
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L1055-L1078
def summarize_mean_in_nats_and_bits(inputs, units, name, nats_name_scope="nats", bits_name_scope="bits_per_dim"): """ """ mean = tf.reduce_mean(input_tensor=inputs) with tf.compat.v1.name_scope(nats_name_scope): tf.compat.v2.summary.scalar( name, mean, step=tf.compat.v1.train.get_or_create_global_step()) with tf.compat.v1.name_scope(bits_name_scope): tf.compat.v2.summary.scalar( name, mean / units / tf.math.log(2.), step=tf.compat.v1.train.get_or_create_global_step())
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/webhdfs_hook.py#L56-L79
def get_conn(self): """ """ connections = self.get_connections(self.webhdfs_conn_id) for connection in connections: try: self.log.debug('Trying namenode %s', connection.host) client = self._get_client(connection) client.status('/') self.log.debug('Using namenode %s for hook', connection.host) return client except HdfsError as hdfs_error: self.log.debug('Read operation on namenode %s failed with error: %s', connection.host, hdfs_error) hosts = [connection.host for connection in connections] error_message = 'Read operations failed on the namenodes below:\n{hosts}'.format( hosts='\n'.join(hosts)) raise AirflowWebHDFSHookException(error_message)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L205-L226
def create_database(self, instance, body, project_id=None): """ """ response = self.get_conn().databases().insert( project=project_id, instance=instance, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/slack_webhook_hook.py#L121-L134
def execute(self): """ """ proxies = {} if self.proxy: # we only need https proxy for Slack, as the endpoint is https proxies = {'https': self.proxy} slack_message = self._build_slack_message() self.run(endpoint=self.webhook_token, data=slack_message, headers={'Content-type': 'application/json'}, extra_options={'proxies': proxies})
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_transfer_operator.py#L113-L117
def _convert_time_to_dict(time): """ """ return {HOURS: time.hour, MINUTES: time.minute, SECONDS: time.second}
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L182-L207
def update_product_set( self, product_set, location=None, product_set_id=None, update_mask=None, project_id=None, retry=None, timeout=None, metadata=None, ): """ """ client = self.get_conn() product_set = self.product_set_name_determiner.get_entity_with_name( product_set, product_set_id, location, project_id ) self.log.info('Updating ProductSet: %s', product_set.name) response = client.update_product_set( product_set=product_set, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata ) self.log.info('ProductSet updated: %s', response.name if response else '') self.log.debug('ProductSet updated:\n%s', response) return MessageToDict(response)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sagemaker_hook.py#L325-L366
def create_training_job(self, config, wait_for_completion=True, print_log=True, check_interval=30, max_ingestion_time=None): """ """ self.check_training_config(config) response = self.get_conn().create_training_job(**config) if print_log: self.check_training_status_with_log(config['TrainingJobName'], self.non_terminal_states, self.failed_states, wait_for_completion, check_interval, max_ingestion_time ) elif wait_for_completion: describe_response = self.check_status(config['TrainingJobName'], 'TrainingJobStatus', self.describe_training_job, check_interval, max_ingestion_time ) billable_time = \ (describe_response['TrainingEndTime'] - describe_response['TrainingStartTime']) * \ describe_response['ResourceConfig']['InstanceCount'] self.log.info('Billable seconds:{}'.format(int(billable_time.total_seconds()) + 1)) return response
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1341-L1353
def get_schema(self, dataset_id, table_id): """ """ tables_resource = self.service.tables() \ .get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \ .execute(num_retries=self.num_retries) return tables_resource['schema']
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_athena_hook.py#L74-L89
def check_query_status(self, query_execution_id): """ """ response = self.conn.get_query_execution(QueryExecutionId=query_execution_id) state = None try: state = response['QueryExecution']['Status']['State'] except Exception as ex: self.log.error('Exception while getting query state', ex) finally: return state
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/util/docstring.py#L30-L51
def expand_docstring(**kwargs): """ """ def _fn_wrapped(fn): """Original function with modified `__doc__` attribute.""" doc = inspect.cleandoc(fn.__doc__) for k, v in six.iteritems(kwargs): # Capture each ${k} reference to replace with v. # We wrap the replacement in a function so no backslash escapes # are processed. pattern = r'\$\{' + str(k) + r'\}' doc = re.sub(pattern, lambda match: v, doc) # pylint: disable=cell-var-from-loop fn.__doc__ = doc return fn return _fn_wrapped
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/experimental/fun_mcmc/fun_mcmc_lib.py#L181-L239
def transform_log_prob_fn(log_prob_fn: PotentialFn, bijector: BijectorNest, init_state: State = None ) -> Union[PotentialFn, Tuple[PotentialFn, State]]: """ """ def wrapper(*args): """Transformed wrapper.""" bijector_ = bijector args = tf.nest.map_structure(lambda x: 0. + x, args) if len(args) == 1: args = args[0] elif isinstance(bijector_, list): bijector_ = tuple(bijector_) original_space_args = tf.nest.map_structure(lambda b, x: b.forward(x), bijector_, args) original_space_args = original_space_args # type: Tuple[Any] original_space_log_prob, extra = call_fn(log_prob_fn, original_space_args) event_ndims = tf.nest.map_structure( lambda x: tf.rank(x) - tf.rank(original_space_log_prob), args) return original_space_log_prob + sum( tf.nest.flatten( tf.nest.map_structure( lambda b, x, e: b.forward_log_det_jacobian(x, event_ndims=e), bijector_, args, event_ndims))), [original_space_args, extra] if init_state is None: return wrapper else: return wrapper, tf.nest.map_structure(lambda b, s: b.inverse(s), bijector, init_state)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/executors/kubernetes_executor.py#L458-L470
def _strip_unsafe_kubernetes_special_chars(string): """ """ return ''.join(ch.lower() for ind, ch in enumerate(string) if ch.isalnum())
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L216-L229
def call(self, inputs): """ """ del inputs # unused with tf.compat.v1.name_scope(self._name): return tfd.MultivariateNormalDiag(self.loc, self.scale_diag)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/lkj.py#L371-L410
def _log_unnorm_prob(self, x, name=None): """ """ with tf.name_scope(name or 'log_unnorm_prob_lkj'): x = tf.convert_to_tensor(value=x, name='x') # The density is det(matrix) ** (concentration - 1). # Computing the determinant with `logdet` is usually fine, since # correlation matrices are Hermitian and PSD. But in some cases, for a # PSD matrix whose eigenvalues are close to zero, `logdet` raises an error # complaining that it is not PSD. The root cause is the computation of the # cholesky decomposition in `logdet`. Hence, we use the less efficient but # more robust `slogdet` which does not use `cholesky`. # # An alternative would have been to check allow_nan_stats and use # eigenvalues = tf.linalg.self_adjoint_eigvals(x) # psd_mask = tf.cast( # tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype) # tf.where(psd_mask, answer, float('-inf')) # to emit probability 0 for inputs that are not PSD, without ever raising # an error. More care must be taken, as due to numerical stability issues, # self_adjoint_eigvals can return slightly negative eigenvalues even for # a PSD matrix. if self.input_output_cholesky: logdet = 2.0 * tf.reduce_sum( input_tensor=tf.math.log(tf.linalg.diag_part(x)), axis=[-1]) else: _, logdet = tf.linalg.slogdet(x) answer = (self.concentration - 1.) * logdet return answer
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcs_hook.py#L340-L358
def get_crc32c(self, bucket_name, object_name): """ """ self.log.info('Retrieving the crc32c checksum of ' 'object_name: %s in bucket_name: %s', object_name, bucket_name) client = self.get_conn() bucket = client.get_bucket(bucket_name=bucket_name) blob = bucket.get_blob(blob_name=object_name) blob.reload() blob_crc32c = blob.crc32c self.log.info('The crc32c checksum of %s is %s', object_name, blob_crc32c) return blob_crc32c
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagbag.py#L341-L396
def collect_dags( self, dag_folder=None, only_if_updated=True, include_examples=configuration.conf.getboolean('core', 'LOAD_EXAMPLES'), safe_mode=configuration.conf.getboolean('core', 'DAG_DISCOVERY_SAFE_MODE')): """ """ start_dttm = timezone.utcnow() dag_folder = dag_folder or self.dag_folder # Used to store stats around DagBag processing stats = [] FileLoadStat = namedtuple( 'FileLoadStat', "file duration dag_num task_num dags") dag_folder = correct_maybe_zipped(dag_folder) for filepath in list_py_file_paths(dag_folder, safe_mode=safe_mode, include_examples=include_examples): try: ts = timezone.utcnow() found_dags = self.process_file( filepath, only_if_updated=only_if_updated, safe_mode=safe_mode) td = timezone.utcnow() - ts td = td.total_seconds() + ( float(td.microseconds) / 1000000) stats.append(FileLoadStat( filepath.replace(dag_folder, ''), td, len(found_dags), sum([len(dag.tasks) for dag in found_dags]), str([dag.dag_id for dag in found_dags]), )) except Exception as e: self.log.exception(e) Stats.gauge( 'collect_dags', (timezone.utcnow() - start_dttm).total_seconds(), 1) Stats.gauge( 'dagbag_size', len(self.dags), 1) Stats.gauge( 'dagbag_import_errors', len(self.import_errors), 1) self.dagbag_stats = sorted( stats, key=lambda x: x.duration, reverse=True)
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/gumbel.py#L201-L224
def _kl_gumbel_gumbel(a, b, name=None): """ """ with tf.name_scope(name or "kl_gumbel_gumbel"): # Consistent with # http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf, page 64 # The paper uses beta to refer to scale and mu to refer to loc. # There is actually an error in the solution as printed; this is based on # the second-to-last step of the derivation. The value as printed would be # off by (a.loc - b.loc) / b.scale. return (tf.math.log(b.scale) - tf.math.log(a.scale) + np.euler_gamma * (a.scale / b.scale - 1.) + tf.math.expm1((b.loc - a.loc) / b.scale + tf.math.lgamma(a.scale / b.scale + 1.)) + (a.loc - b.loc) / b.scale)
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L601-L615
def get_proxy_version(self): """ """ self._download_sql_proxy_if_needed() command_to_run = [self.sql_proxy_path] command_to_run.extend(['--version']) command_to_run.extend(self._get_credential_parameters()) result = subprocess.check_output(command_to_run).decode('utf-8') pattern = re.compile("^.*[V|v]ersion ([^;]*);.*$") m = pattern.match(result) if m: return m.group(1) else: return None
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dates.py#L114-L188
def round_time(dt, delta, start_date=timezone.make_aware(datetime.min)): """ """ if isinstance(delta, six.string_types): # It's cron based, so it's easy tz = start_date.tzinfo start_date = timezone.make_naive(start_date, tz) cron = croniter(delta, start_date) prev = cron.get_prev(datetime) if prev == start_date: return timezone.make_aware(start_date, tz) else: return timezone.make_aware(prev, tz) # Ignore the microseconds of dt dt -= timedelta(microseconds=dt.microsecond) # We are looking for a datetime in the form start_date + i * delta # which is as close as possible to dt. Since delta could be a relative # delta we don't know its exact length in seconds so we cannot rely on # division to find i. Instead we employ a binary search algorithm, first # finding an upper and lower limit and then disecting the interval until # we have found the closest match. # We first search an upper limit for i for which start_date + upper * delta # exceeds dt. upper = 1 while start_date + upper * delta < dt: # To speed up finding an upper limit we grow this exponentially by a # factor of 2 upper *= 2 # Since upper is the first value for which start_date + upper * delta # exceeds dt, upper // 2 is below dt and therefore forms a lower limited # for the i we are looking for lower = upper // 2 # We now continue to intersect the interval between # start_date + lower * delta and start_date + upper * delta # until we find the closest value while True: # Invariant: start + lower * delta < dt <= start + upper * delta # If start_date + (lower + 1)*delta exceeds dt, then either lower or # lower+1 has to be the solution we are searching for if start_date + (lower + 1) * delta >= dt: # Check if start_date + (lower + 1)*delta or # start_date + lower*delta is closer to dt and return the solution if (start_date + (lower + 1) * delta) - dt <= dt - (start_date + lower * delta): return start_date + (lower + 1) * delta else: return start_date + lower * delta # We intersect the interval and either replace the lower or upper # limit with the candidate candidate = lower + (upper - lower) // 2 if start_date + candidate * delta >= dt: upper = candidate else: lower = candidate
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/jobs.py#L2118-L2366
def _process_backfill_task_instances(self, ti_status, executor, pickle_id, start_date=None, session=None): """ """ executed_run_dates = [] while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and len(ti_status.deadlocked) == 0): self.log.debug("*** Clearing out not_ready list ***") ti_status.not_ready.clear() # we need to execute the tasks bottom to top # or leaf to root, as otherwise tasks might be # determined deadlocked while they are actually # waiting for their upstream to finish @provide_session def _per_task_process(task, key, ti, session=None): ti.refresh_from_db() task = self.dag.get_task(ti.task_id) ti.task = task ignore_depends_on_past = ( self.ignore_first_depends_on_past and ti.execution_date == (start_date or ti.start_date)) self.log.debug( "Task instance to run %s state %s", ti, ti.state) # The task was already marked successful or skipped by a # different Job. Don't rerun it. if ti.state == State.SUCCESS: ti_status.succeeded.add(key) self.log.debug("Task instance %s succeeded. Don't rerun.", ti) ti_status.to_run.pop(key) if key in ti_status.running: ti_status.running.pop(key) return elif ti.state == State.SKIPPED: ti_status.skipped.add(key) self.log.debug("Task instance %s skipped. Don't rerun.", ti) ti_status.to_run.pop(key) if key in ti_status.running: ti_status.running.pop(key) return # guard against externally modified tasks instances or # in case max concurrency has been reached at task runtime elif ti.state == State.NONE: self.log.warning( "FIXME: task instance {} state was set to None " "externally. This should not happen" ) ti.set_state(State.SCHEDULED, session=session) if self.rerun_failed_tasks: # Rerun failed tasks or upstreamed failed tasks if ti.state in (State.FAILED, State.UPSTREAM_FAILED): self.log.error("Task instance {ti} " "with state {state}".format(ti=ti, state=ti.state)) if key in ti_status.running: ti_status.running.pop(key) # Reset the failed task in backfill to scheduled state ti.set_state(State.SCHEDULED, session=session) else: # Default behaviour which works for subdag. if ti.state in (State.FAILED, State.UPSTREAM_FAILED): self.log.error("Task instance {ti} " "with {state} state".format(ti=ti, state=ti.state)) ti_status.failed.add(key) ti_status.to_run.pop(key) if key in ti_status.running: ti_status.running.pop(key) return backfill_context = DepContext( deps=RUN_DEPS, ignore_depends_on_past=ignore_depends_on_past, ignore_task_deps=self.ignore_task_deps, flag_upstream_failed=True) # Is the task runnable? -- then run it # the dependency checker can change states of tis if ti.are_dependencies_met( dep_context=backfill_context, session=session, verbose=self.verbose): ti.refresh_from_db(lock_for_update=True, session=session) if ti.state in (State.SCHEDULED, State.UP_FOR_RETRY, State.UP_FOR_RESCHEDULE): if executor.has_task(ti): self.log.debug( "Task Instance %s already in executor " "waiting for queue to clear", ti ) else: self.log.debug('Sending %s to executor', ti) # Skip scheduled state, we are executing immediately ti.state = State.QUEUED ti.queued_dttm = timezone.utcnow() if not ti.queued_dttm else ti.queued_dttm session.merge(ti) cfg_path = None if executor.__class__ in (executors.LocalExecutor, executors.SequentialExecutor): cfg_path = tmp_configuration_copy() executor.queue_task_instance( ti, mark_success=self.mark_success, pickle_id=pickle_id, ignore_task_deps=self.ignore_task_deps, ignore_depends_on_past=ignore_depends_on_past, pool=self.pool, cfg_path=cfg_path) ti_status.running[key] = ti ti_status.to_run.pop(key) session.commit() return if ti.state == State.UPSTREAM_FAILED: self.log.error("Task instance %s upstream failed", ti) ti_status.failed.add(key) ti_status.to_run.pop(key) if key in ti_status.running: ti_status.running.pop(key) return # special case if ti.state == State.UP_FOR_RETRY: self.log.debug( "Task instance %s retry period not " "expired yet", ti) if key in ti_status.running: ti_status.running.pop(key) ti_status.to_run[key] = ti return # special case if ti.state == State.UP_FOR_RESCHEDULE: self.log.debug( "Task instance %s reschedule period not " "expired yet", ti) if key in ti_status.running: ti_status.running.pop(key) ti_status.to_run[key] = ti return # all remaining tasks self.log.debug('Adding %s to not_ready', ti) ti_status.not_ready.add(key) non_pool_slots = conf.getint('core', 'non_pooled_backfill_task_slot_count') try: for task in self.dag.topological_sort(): for key, ti in list(ti_status.to_run.items()): if task.task_id != ti.task_id: continue if task.pool: pool = session.query(models.Pool) \ .filter(models.Pool.pool == task.pool) \ .first() if not pool: raise PoolNotFound('Unknown pool: {}'.format(task.pool)) open_slots = pool.open_slots(session=session) if open_slots <= 0: raise NoAvailablePoolSlot( "Not scheduling since there are " "%s open slots in pool %s".format( open_slots, task.pool)) else: if non_pool_slots <= 0: raise NoAvailablePoolSlot( "Not scheduling since there are no " "non_pooled_backfill_task_slot_count.") non_pool_slots -= 1 num_running_tasks = DAG.get_num_task_instances( self.dag_id, states=(State.QUEUED, State.RUNNING)) if num_running_tasks >= self.dag.concurrency: raise DagConcurrencyLimitReached( "Not scheduling since concurrency limit " "is reached." ) _per_task_process(task, key, ti) except (NoAvailablePoolSlot, DagConcurrencyLimitReached) as e: self.log.debug(e) # execute the tasks in the queue self.heartbeat() executor.heartbeat() # If the set of tasks that aren't ready ever equals the set of # tasks to run and there are no running tasks then the backfill # is deadlocked if (ti_status.not_ready and ti_status.not_ready == set(ti_status.to_run) and len(ti_status.running) == 0): self.log.warning( "Deadlock discovered for ti_status.to_run=%s", ti_status.to_run.values() ) ti_status.deadlocked.update(ti_status.to_run.values()) ti_status.to_run.clear() # check executor state self._manage_executor_state(ti_status.running) # update the task counters self._update_counters(ti_status=ti_status) # update dag run state _dag_runs = ti_status.active_runs[:] for run in _dag_runs: run.update_state(session=session) if run.state in State.finished(): ti_status.finished_runs += 1 ti_status.active_runs.remove(run) executed_run_dates.append(run.execution_date) self._log_progress(ti_status) # return updated status return executed_run_dates