text
stringlengths
75
104k
code_tokens
sequence
avg_line_len
float64
7.91
980
score
float64
0
0.18
def heatmap(n_x=5,n_y=10): """ Returns a DataFrame with the required format for a heatmap plot Parameters: ----------- n_x : int Number of x categories n_y : int Number of y categories """ x=['x_'+str(_) for _ in range(n_x)] y=['y_'+str(_) for _ in range(n_y)] return pd.DataFrame(surface(n_x-1,n_y-1).values,index=x,columns=y)
[ "def", "heatmap", "(", "n_x", "=", "5", ",", "n_y", "=", "10", ")", ":", "x", "=", "[", "'x_'", "+", "str", "(", "_", ")", "for", "_", "in", "range", "(", "n_x", ")", "]", "y", "=", "[", "'y_'", "+", "str", "(", "_", ")", "for", "_", "in", "range", "(", "n_y", ")", "]", "return", "pd", ".", "DataFrame", "(", "surface", "(", "n_x", "-", "1", ",", "n_y", "-", "1", ")", ".", "values", ",", "index", "=", "x", ",", "columns", "=", "y", ")" ]
22.333333
0.063037
def convert_batchnorm(builder, layer, input_names, output_names, keras_layer): """ Parameters keras_layer: layer A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ # Get input and output names input_name, output_name = (input_names[0], output_names[0]) # Currently CoreML supports only per-channel batch-norm if keras_layer.mode != 0: raise NotImplementedError( 'Currently supports only per-feature normalization') axis = keras_layer.axis nb_channels = keras_layer.input_shape[axis] # Set parameters # Parameter arrangement in Keras: gamma, beta, mean, variance gamma = keras_layer.get_weights()[0] beta = keras_layer.get_weights()[1] mean = keras_layer.get_weights()[2] std = keras_layer.get_weights()[3] # compute adjusted parameters variance = std * std f = 1.0 / np.sqrt(std + keras_layer.epsilon) gamma1 = gamma*f beta1 = beta - gamma*mean*f mean[:] = 0.0 #mean variance[:] = 1.0 - .00001 #stddev builder.add_batchnorm( name = layer, channels = nb_channels, gamma = gamma1, beta = beta1, mean = mean, variance = variance, input_name = input_name, output_name = output_name)
[ "def", "convert_batchnorm", "(", "builder", ",", "layer", ",", "input_names", ",", "output_names", ",", "keras_layer", ")", ":", "# Get input and output names", "input_name", ",", "output_name", "=", "(", "input_names", "[", "0", "]", ",", "output_names", "[", "0", "]", ")", "# Currently CoreML supports only per-channel batch-norm", "if", "keras_layer", ".", "mode", "!=", "0", ":", "raise", "NotImplementedError", "(", "'Currently supports only per-feature normalization'", ")", "axis", "=", "keras_layer", ".", "axis", "nb_channels", "=", "keras_layer", ".", "input_shape", "[", "axis", "]", "# Set parameters", "# Parameter arrangement in Keras: gamma, beta, mean, variance", "gamma", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "0", "]", "beta", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "1", "]", "mean", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "2", "]", "std", "=", "keras_layer", ".", "get_weights", "(", ")", "[", "3", "]", "# compute adjusted parameters", "variance", "=", "std", "*", "std", "f", "=", "1.0", "/", "np", ".", "sqrt", "(", "std", "+", "keras_layer", ".", "epsilon", ")", "gamma1", "=", "gamma", "*", "f", "beta1", "=", "beta", "-", "gamma", "*", "mean", "*", "f", "mean", "[", ":", "]", "=", "0.0", "#mean", "variance", "[", ":", "]", "=", "1.0", "-", ".00001", "#stddev", "builder", ".", "add_batchnorm", "(", "name", "=", "layer", ",", "channels", "=", "nb_channels", ",", "gamma", "=", "gamma1", ",", "beta", "=", "beta1", ",", "mean", "=", "mean", ",", "variance", "=", "variance", ",", "input_name", "=", "input_name", ",", "output_name", "=", "output_name", ")" ]
28.422222
0.016629
def update_offset(self, offset): # type: (int) -> None ''' Update the offset for this CE record. Parameters: extent - The new offset for this CE record. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('CE record not yet initialized!') self.offset_cont_area = offset
[ "def", "update_offset", "(", "self", ",", "offset", ")", ":", "# type: (int) -> None", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'CE record not yet initialized!'", ")", "self", ".", "offset_cont_area", "=", "offset" ]
28
0.009877
def get_param(self): """Method to get current optimizer's parameter value """ lr_list = self.lr_scheduler.get_lr() if len(lr_list) > 1: raise ValueError("Optimizer passed to lr_scheduler should have a single param group, " "but currently there are {} param groups".format(len(lr_list))) return lr_list[0]
[ "def", "get_param", "(", "self", ")", ":", "lr_list", "=", "self", ".", "lr_scheduler", ".", "get_lr", "(", ")", "if", "len", "(", "lr_list", ")", ">", "1", ":", "raise", "ValueError", "(", "\"Optimizer passed to lr_scheduler should have a single param group, \"", "\"but currently there are {} param groups\"", ".", "format", "(", "len", "(", "lr_list", ")", ")", ")", "return", "lr_list", "[", "0", "]" ]
47.25
0.01039
def get_url(path, dest='', saltenv='base', makedirs=False, source_hash=None): ''' .. versionchanged:: 2018.3.0 ``dest`` can now be a directory Used to get a single file from a URL. path A URL to download a file from. Supported URL schemes are: ``salt://``, ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and ``file://`` (local filesystem). If no scheme was specified, this is equivalent of using ``file://``. If a ``file://`` URL is given, the function just returns absolute path to that file on a local filesystem. The function returns ``False`` if Salt was unable to fetch a file from a ``salt://`` URL. dest The default behaviour is to write the fetched file to the given destination path. If this parameter is omitted or set as empty string (``''``), the function places the remote file on the local filesystem inside the Minion cache directory and returns the path to that file. .. note:: To simply return the file contents instead, set destination to ``None``. This works with ``salt://``, ``http://``, ``https://`` and ``file://`` URLs. The files fetched by ``http://`` and ``https://`` will not be cached. saltenv : base Salt fileserver envrionment from which to retrieve the file. Ignored if ``path`` is not a ``salt://`` URL. source_hash If ``path`` is an http(s) or ftp URL and the file exists in the minion's file cache, this option can be passed to keep the minion from re-downloading the file if the cached copy matches the specified hash. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine salt '*' cp.get_url http://www.slashdot.org /tmp/index.html ''' if isinstance(dest, six.string_types): result = _client().get_url( path, dest, makedirs, saltenv, source_hash=source_hash) else: result = _client().get_url( path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash) if not result: log.error('Unable to fetch file %s from saltenv %s.', salt.utils.url.redact_http_basic_auth(path), saltenv) return result
[ "def", "get_url", "(", "path", ",", "dest", "=", "''", ",", "saltenv", "=", "'base'", ",", "makedirs", "=", "False", ",", "source_hash", "=", "None", ")", ":", "if", "isinstance", "(", "dest", ",", "six", ".", "string_types", ")", ":", "result", "=", "_client", "(", ")", ".", "get_url", "(", "path", ",", "dest", ",", "makedirs", ",", "saltenv", ",", "source_hash", "=", "source_hash", ")", "else", ":", "result", "=", "_client", "(", ")", ".", "get_url", "(", "path", ",", "None", ",", "makedirs", ",", "saltenv", ",", "no_cache", "=", "True", ",", "source_hash", "=", "source_hash", ")", "if", "not", "result", ":", "log", ".", "error", "(", "'Unable to fetch file %s from saltenv %s.'", ",", "salt", ".", "utils", ".", "url", ".", "redact_http_basic_auth", "(", "path", ")", ",", "saltenv", ")", "return", "result" ]
39.559322
0.000836
def write_aims(filename, atoms): """Method to write FHI-aims geometry files in phonopy context.""" lines = "" lines += "# geometry.in for FHI-aims \n" lines += "# | generated by phonopy.FHIaims.write_aims() \n" lattice_vector_line = "lattice_vector " + "%16.16f "*3 + "\n" for vec in atoms.get_cell(): lines += lattice_vector_line % tuple(vec) N = atoms.get_number_of_atoms() atom_line = "atom " + "%16.16f "*3 + "%s \n" positions = atoms.get_positions() symbols = atoms.get_chemical_symbols() initial_moment_line = "initial_moment %16.6f\n" magmoms = atoms.get_magnetic_moments() for n in range(N): lines += atom_line % (tuple(positions[n]) + (symbols[n],)) if magmoms is not None: lines += initial_moment_line % magmoms[n] with open(filename, 'w') as f: f.write(lines)
[ "def", "write_aims", "(", "filename", ",", "atoms", ")", ":", "lines", "=", "\"\"", "lines", "+=", "\"# geometry.in for FHI-aims \\n\"", "lines", "+=", "\"# | generated by phonopy.FHIaims.write_aims() \\n\"", "lattice_vector_line", "=", "\"lattice_vector \"", "+", "\"%16.16f \"", "*", "3", "+", "\"\\n\"", "for", "vec", "in", "atoms", ".", "get_cell", "(", ")", ":", "lines", "+=", "lattice_vector_line", "%", "tuple", "(", "vec", ")", "N", "=", "atoms", ".", "get_number_of_atoms", "(", ")", "atom_line", "=", "\"atom \"", "+", "\"%16.16f \"", "*", "3", "+", "\"%s \\n\"", "positions", "=", "atoms", ".", "get_positions", "(", ")", "symbols", "=", "atoms", ".", "get_chemical_symbols", "(", ")", "initial_moment_line", "=", "\"initial_moment %16.6f\\n\"", "magmoms", "=", "atoms", ".", "get_magnetic_moments", "(", ")", "for", "n", "in", "range", "(", "N", ")", ":", "lines", "+=", "atom_line", "%", "(", "tuple", "(", "positions", "[", "n", "]", ")", "+", "(", "symbols", "[", "n", "]", ",", ")", ")", "if", "magmoms", "is", "not", "None", ":", "lines", "+=", "initial_moment_line", "%", "magmoms", "[", "n", "]", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "lines", ")" ]
31.518519
0.00114
def set_client_ca_list(self, certificate_authorities): """ Set the list of preferred client certificate signers for this server context. This list of certificate authorities will be sent to the client when the server requests a client certificate. :param certificate_authorities: a sequence of X509Names. :return: None .. versionadded:: 0.10 """ name_stack = _lib.sk_X509_NAME_new_null() _openssl_assert(name_stack != _ffi.NULL) try: for ca_name in certificate_authorities: if not isinstance(ca_name, X509Name): raise TypeError( "client CAs must be X509Name objects, not %s " "objects" % ( type(ca_name).__name__, ) ) copy = _lib.X509_NAME_dup(ca_name._name) _openssl_assert(copy != _ffi.NULL) push_result = _lib.sk_X509_NAME_push(name_stack, copy) if not push_result: _lib.X509_NAME_free(copy) _raise_current_error() except Exception: _lib.sk_X509_NAME_free(name_stack) raise _lib.SSL_CTX_set_client_CA_list(self._context, name_stack)
[ "def", "set_client_ca_list", "(", "self", ",", "certificate_authorities", ")", ":", "name_stack", "=", "_lib", ".", "sk_X509_NAME_new_null", "(", ")", "_openssl_assert", "(", "name_stack", "!=", "_ffi", ".", "NULL", ")", "try", ":", "for", "ca_name", "in", "certificate_authorities", ":", "if", "not", "isinstance", "(", "ca_name", ",", "X509Name", ")", ":", "raise", "TypeError", "(", "\"client CAs must be X509Name objects, not %s \"", "\"objects\"", "%", "(", "type", "(", "ca_name", ")", ".", "__name__", ",", ")", ")", "copy", "=", "_lib", ".", "X509_NAME_dup", "(", "ca_name", ".", "_name", ")", "_openssl_assert", "(", "copy", "!=", "_ffi", ".", "NULL", ")", "push_result", "=", "_lib", ".", "sk_X509_NAME_push", "(", "name_stack", ",", "copy", ")", "if", "not", "push_result", ":", "_lib", ".", "X509_NAME_free", "(", "copy", ")", "_raise_current_error", "(", ")", "except", "Exception", ":", "_lib", ".", "sk_X509_NAME_free", "(", "name_stack", ")", "raise", "_lib", ".", "SSL_CTX_set_client_CA_list", "(", "self", ".", "_context", ",", "name_stack", ")" ]
36.472222
0.001484
def mean_if_greater_than_zero(vals): """ Calculate mean over numerical values, ignoring values less than zero. E.g. used for mean time over coincident triggers when timestamps are set to -1 for ifos not included in the coincidence. Parameters ---------- vals: iterator of numerical values values to be mean averaged Returns ------- mean: float The mean of the values in the original vector which are greater than zero num_above_zero: int The number of entries in the vector which are above zero """ vals = numpy.array(vals) above_zero = vals > 0 return vals[above_zero].mean(), above_zero.sum()
[ "def", "mean_if_greater_than_zero", "(", "vals", ")", ":", "vals", "=", "numpy", ".", "array", "(", "vals", ")", "above_zero", "=", "vals", ">", "0", "return", "vals", "[", "above_zero", "]", ".", "mean", "(", ")", ",", "above_zero", ".", "sum", "(", ")" ]
31.619048
0.001462
def normalizeBoolean(value): """ Normalizes a boolean. * **value** must be an ``int`` with value of 0 or 1, or a ``bool``. * Returned value will be a boolean. """ if isinstance(value, int) and value in (0, 1): value = bool(value) if not isinstance(value, bool): raise ValueError("Boolean values must be True or False, not '%s'." % value) return value
[ "def", "normalizeBoolean", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "int", ")", "and", "value", "in", "(", "0", ",", "1", ")", ":", "value", "=", "bool", "(", "value", ")", "if", "not", "isinstance", "(", "value", ",", "bool", ")", ":", "raise", "ValueError", "(", "\"Boolean values must be True or False, not '%s'.\"", "%", "value", ")", "return", "value" ]
31.692308
0.002358
def which(cmd, safe=False): """https://github.com/jc0n/python-which""" from autopaths.file_path import FilePath def is_executable(path): return os.path.exists(path) and os.access(path, os.X_OK) and not os.path.isdir(path) path, name = os.path.split(cmd) if path: if is_executable(cmd): return FilePath(cmd) else: for path in os.environ['PATH'].split(os.pathsep): candidate = os.path.join(path, cmd) if is_executable(candidate): return FilePath(candidate) if not safe: raise Exception('which failed to locate a proper command path "%s"' % cmd)
[ "def", "which", "(", "cmd", ",", "safe", "=", "False", ")", ":", "from", "autopaths", ".", "file_path", "import", "FilePath", "def", "is_executable", "(", "path", ")", ":", "return", "os", ".", "path", ".", "exists", "(", "path", ")", "and", "os", ".", "access", "(", "path", ",", "os", ".", "X_OK", ")", "and", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", "path", ",", "name", "=", "os", ".", "path", ".", "split", "(", "cmd", ")", "if", "path", ":", "if", "is_executable", "(", "cmd", ")", ":", "return", "FilePath", "(", "cmd", ")", "else", ":", "for", "path", "in", "os", ".", "environ", "[", "'PATH'", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "candidate", "=", "os", ".", "path", ".", "join", "(", "path", ",", "cmd", ")", "if", "is_executable", "(", "candidate", ")", ":", "return", "FilePath", "(", "candidate", ")", "if", "not", "safe", ":", "raise", "Exception", "(", "'which failed to locate a proper command path \"%s\"'", "%", "cmd", ")" ]
46.615385
0.011327
def set_power(self, state): """Sets the power state of the smart plug.""" packet = bytearray(16) packet[0] = 2 if self.check_nightlight(): packet[4] = 3 if state else 2 else: packet[4] = 1 if state else 0 self.send_packet(0x6a, packet)
[ "def", "set_power", "(", "self", ",", "state", ")", ":", "packet", "=", "bytearray", "(", "16", ")", "packet", "[", "0", "]", "=", "2", "if", "self", ".", "check_nightlight", "(", ")", ":", "packet", "[", "4", "]", "=", "3", "if", "state", "else", "2", "else", ":", "packet", "[", "4", "]", "=", "1", "if", "state", "else", "0", "self", ".", "send_packet", "(", "0x6a", ",", "packet", ")" ]
29.222222
0.01107
def compare_schemas(one, two): """Compare two structures that represents JSON schemas. For comparison you can't use normal comparison, because in JSON schema lists DO NOT keep order (and Python lists do), so this must be taken into account during comparison. Note this wont check all configurations, only first one that seems to match, which can lead to wrong results. :param one: First schema to compare. :param two: Second schema to compare. :rtype: `bool` """ one = _normalize_string_type(one) two = _normalize_string_type(two) _assert_same_types(one, two) if isinstance(one, list): return _compare_lists(one, two) elif isinstance(one, dict): return _compare_dicts(one, two) elif isinstance(one, SCALAR_TYPES): return one == two elif one is None: return one is two else: raise RuntimeError('Not allowed type "{type}"'.format( type=type(one).__name__))
[ "def", "compare_schemas", "(", "one", ",", "two", ")", ":", "one", "=", "_normalize_string_type", "(", "one", ")", "two", "=", "_normalize_string_type", "(", "two", ")", "_assert_same_types", "(", "one", ",", "two", ")", "if", "isinstance", "(", "one", ",", "list", ")", ":", "return", "_compare_lists", "(", "one", ",", "two", ")", "elif", "isinstance", "(", "one", ",", "dict", ")", ":", "return", "_compare_dicts", "(", "one", ",", "two", ")", "elif", "isinstance", "(", "one", ",", "SCALAR_TYPES", ")", ":", "return", "one", "==", "two", "elif", "one", "is", "None", ":", "return", "one", "is", "two", "else", ":", "raise", "RuntimeError", "(", "'Not allowed type \"{type}\"'", ".", "format", "(", "type", "=", "type", "(", "one", ")", ".", "__name__", ")", ")" ]
30.774194
0.001016
def _handle_timeout(self) -> None: """Called by IOLoop when the requested timeout has passed.""" self._timeout = None while True: try: ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0) except pycurl.error as e: ret = e.args[0] if ret != pycurl.E_CALL_MULTI_PERFORM: break self._finish_pending_requests() # In theory, we shouldn't have to do this because curl will # call _set_timeout whenever the timeout changes. However, # sometimes after _handle_timeout we will need to reschedule # immediately even though nothing has changed from curl's # perspective. This is because when socket_action is # called with SOCKET_TIMEOUT, libcurl decides internally which # timeouts need to be processed by using a monotonic clock # (where available) while tornado uses python's time.time() # to decide when timeouts have occurred. When those clocks # disagree on elapsed time (as they will whenever there is an # NTP adjustment), tornado might call _handle_timeout before # libcurl is ready. After each timeout, resync the scheduled # timeout with libcurl's current state. new_timeout = self._multi.timeout() if new_timeout >= 0: self._set_timeout(new_timeout)
[ "def", "_handle_timeout", "(", "self", ")", "->", "None", ":", "self", ".", "_timeout", "=", "None", "while", "True", ":", "try", ":", "ret", ",", "num_handles", "=", "self", ".", "_multi", ".", "socket_action", "(", "pycurl", ".", "SOCKET_TIMEOUT", ",", "0", ")", "except", "pycurl", ".", "error", "as", "e", ":", "ret", "=", "e", ".", "args", "[", "0", "]", "if", "ret", "!=", "pycurl", ".", "E_CALL_MULTI_PERFORM", ":", "break", "self", ".", "_finish_pending_requests", "(", ")", "# In theory, we shouldn't have to do this because curl will", "# call _set_timeout whenever the timeout changes. However,", "# sometimes after _handle_timeout we will need to reschedule", "# immediately even though nothing has changed from curl's", "# perspective. This is because when socket_action is", "# called with SOCKET_TIMEOUT, libcurl decides internally which", "# timeouts need to be processed by using a monotonic clock", "# (where available) while tornado uses python's time.time()", "# to decide when timeouts have occurred. When those clocks", "# disagree on elapsed time (as they will whenever there is an", "# NTP adjustment), tornado might call _handle_timeout before", "# libcurl is ready. After each timeout, resync the scheduled", "# timeout with libcurl's current state.", "new_timeout", "=", "self", ".", "_multi", ".", "timeout", "(", ")", "if", "new_timeout", ">=", "0", ":", "self", ".", "_set_timeout", "(", "new_timeout", ")" ]
49.785714
0.002111
def _get_context(center_idx, sentence_boundaries, window_size, random_window_size, seed): """Compute the context with respect to a center word in a sentence. Takes an numpy array of sentences boundaries. """ random.seed(seed + center_idx) sentence_index = np.searchsorted(sentence_boundaries, center_idx) sentence_start, sentence_end = _get_sentence_start_end( sentence_boundaries, sentence_index) if random_window_size: window_size = random.randint(1, window_size) start_idx = max(sentence_start, center_idx - window_size) end_idx = min(sentence_end, center_idx + window_size + 1) if start_idx != center_idx and center_idx + 1 != end_idx: context = np.concatenate((np.arange(start_idx, center_idx), np.arange(center_idx + 1, end_idx))) elif start_idx != center_idx: context = np.arange(start_idx, center_idx) elif center_idx + 1 != end_idx: context = np.arange(center_idx + 1, end_idx) else: context = None return context
[ "def", "_get_context", "(", "center_idx", ",", "sentence_boundaries", ",", "window_size", ",", "random_window_size", ",", "seed", ")", ":", "random", ".", "seed", "(", "seed", "+", "center_idx", ")", "sentence_index", "=", "np", ".", "searchsorted", "(", "sentence_boundaries", ",", "center_idx", ")", "sentence_start", ",", "sentence_end", "=", "_get_sentence_start_end", "(", "sentence_boundaries", ",", "sentence_index", ")", "if", "random_window_size", ":", "window_size", "=", "random", ".", "randint", "(", "1", ",", "window_size", ")", "start_idx", "=", "max", "(", "sentence_start", ",", "center_idx", "-", "window_size", ")", "end_idx", "=", "min", "(", "sentence_end", ",", "center_idx", "+", "window_size", "+", "1", ")", "if", "start_idx", "!=", "center_idx", "and", "center_idx", "+", "1", "!=", "end_idx", ":", "context", "=", "np", ".", "concatenate", "(", "(", "np", ".", "arange", "(", "start_idx", ",", "center_idx", ")", ",", "np", ".", "arange", "(", "center_idx", "+", "1", ",", "end_idx", ")", ")", ")", "elif", "start_idx", "!=", "center_idx", ":", "context", "=", "np", ".", "arange", "(", "start_idx", ",", "center_idx", ")", "elif", "center_idx", "+", "1", "!=", "end_idx", ":", "context", "=", "np", ".", "arange", "(", "center_idx", "+", "1", ",", "end_idx", ")", "else", ":", "context", "=", "None", "return", "context" ]
36.37931
0.000923
def character_embedding_network(char_placeholder: tf.Tensor, n_characters: int = None, emb_mat: np.array = None, char_embedding_dim: int = None, filter_widths=(3, 4, 5, 7), highway_on_top=False): """ Characters to vector. Every sequence of characters (token) is embedded to vector space with dimensionality char_embedding_dim Convolution plus max_pooling is used to obtain vector representations of words. Args: char_placeholder: placeholder of int32 type with dimensionality [B, T, C] B - batch size (can be None) T - Number of tokens (can be None) C - number of characters (can be None) n_characters: total number of unique characters emb_mat: if n_characters is not provided the emb_mat should be provided it is a numpy array with dimensions [V, E], where V - vocabulary size and E - embeddings dimension char_embedding_dim: dimensionality of characters embeddings filter_widths: array of width of kernel in convolutional embedding network used in parallel Returns: embeddings: tf.Tensor with dimensionality [B, T, F], where F is dimensionality of embeddings """ if emb_mat is None: emb_mat = np.random.randn(n_characters, char_embedding_dim).astype(np.float32) / np.sqrt(char_embedding_dim) else: char_embedding_dim = emb_mat.shape[1] char_emb_var = tf.Variable(emb_mat, trainable=True) with tf.variable_scope('Char_Emb_Network'): # Character embedding layer c_emb = tf.nn.embedding_lookup(char_emb_var, char_placeholder) # Character embedding network conv_results_list = [] for filter_width in filter_widths: conv_results_list.append(tf.layers.conv2d(c_emb, char_embedding_dim, (1, filter_width), padding='same', kernel_initializer=INITIALIZER)) units = tf.concat(conv_results_list, axis=3) units = tf.reduce_max(units, axis=2) if highway_on_top: sigmoid_gate = tf.layers.dense(units, 1, activation=tf.sigmoid, kernel_initializer=INITIALIZER, kernel_regularizer=tf.nn.l2_loss) deeper_units = tf.layers.dense(units, tf.shape(units)[-1], kernel_initializer=INITIALIZER, kernel_regularizer=tf.nn.l2_loss) units = sigmoid_gate * units + (1 - sigmoid_gate) * deeper_units units = tf.nn.relu(units) return units
[ "def", "character_embedding_network", "(", "char_placeholder", ":", "tf", ".", "Tensor", ",", "n_characters", ":", "int", "=", "None", ",", "emb_mat", ":", "np", ".", "array", "=", "None", ",", "char_embedding_dim", ":", "int", "=", "None", ",", "filter_widths", "=", "(", "3", ",", "4", ",", "5", ",", "7", ")", ",", "highway_on_top", "=", "False", ")", ":", "if", "emb_mat", "is", "None", ":", "emb_mat", "=", "np", ".", "random", ".", "randn", "(", "n_characters", ",", "char_embedding_dim", ")", ".", "astype", "(", "np", ".", "float32", ")", "/", "np", ".", "sqrt", "(", "char_embedding_dim", ")", "else", ":", "char_embedding_dim", "=", "emb_mat", ".", "shape", "[", "1", "]", "char_emb_var", "=", "tf", ".", "Variable", "(", "emb_mat", ",", "trainable", "=", "True", ")", "with", "tf", ".", "variable_scope", "(", "'Char_Emb_Network'", ")", ":", "# Character embedding layer", "c_emb", "=", "tf", ".", "nn", ".", "embedding_lookup", "(", "char_emb_var", ",", "char_placeholder", ")", "# Character embedding network", "conv_results_list", "=", "[", "]", "for", "filter_width", "in", "filter_widths", ":", "conv_results_list", ".", "append", "(", "tf", ".", "layers", ".", "conv2d", "(", "c_emb", ",", "char_embedding_dim", ",", "(", "1", ",", "filter_width", ")", ",", "padding", "=", "'same'", ",", "kernel_initializer", "=", "INITIALIZER", ")", ")", "units", "=", "tf", ".", "concat", "(", "conv_results_list", ",", "axis", "=", "3", ")", "units", "=", "tf", ".", "reduce_max", "(", "units", ",", "axis", "=", "2", ")", "if", "highway_on_top", ":", "sigmoid_gate", "=", "tf", ".", "layers", ".", "dense", "(", "units", ",", "1", ",", "activation", "=", "tf", ".", "sigmoid", ",", "kernel_initializer", "=", "INITIALIZER", ",", "kernel_regularizer", "=", "tf", ".", "nn", ".", "l2_loss", ")", "deeper_units", "=", "tf", ".", "layers", ".", "dense", "(", "units", ",", "tf", ".", "shape", "(", "units", ")", "[", "-", "1", "]", ",", "kernel_initializer", "=", "INITIALIZER", ",", "kernel_regularizer", "=", "tf", ".", "nn", ".", "l2_loss", ")", "units", "=", "sigmoid_gate", "*", "units", "+", "(", "1", "-", "sigmoid_gate", ")", "*", "deeper_units", "units", "=", "tf", ".", "nn", ".", "relu", "(", "units", ")", "return", "units" ]
51.05
0.001922
def analyze(self, mode=None, timesteps=None): """Analyzes the grid by power flow analysis Analyze the grid for violations of hosting capacity. Means, perform a power flow analysis and obtain voltages at nodes (load, generator, stations/transformers and branch tees) and active/reactive power at lines. The power flow analysis can currently only be performed for both grid levels MV and LV. See ToDos section for more information. A static `non-linear power flow analysis is performed using PyPSA <https://www.pypsa.org/doc/power_flow.html#full-non-linear-power-flow>`_. The high-voltage to medium-voltage transformer are not included in the analysis. The slack bus is defined at secondary side of these transformers assuming an ideal tap changer. Hence, potential overloading of the transformers is not studied here. Parameters ---------- mode : str Allows to toggle between power flow analysis (PFA) on the whole grid topology (MV + LV), only MV or only LV. Defaults to None which equals power flow analysis for MV + LV which is the only implemented option at the moment. See ToDos section for more information. timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>` Timesteps specifies for which time steps to conduct the power flow analysis. It defaults to None in which case the time steps in timeseries.timeindex (see :class:`~.grid.network.TimeSeries`) are used. Notes ----- The current implementation always translates the grid topology representation to the PyPSA format and stores it to :attr:`self.network.pypsa`. ToDos ------ The option to export only the edisgo MV grid (mode = 'mv') to conduct a power flow analysis is implemented in :func:`~.tools.pypsa_io.to_pypsa` but NotImplementedError is raised since the rest of edisgo does not handle this option yet. The analyze function will throw an error since :func:`~.tools.pypsa_io.process_pfa_results` does not handle aggregated loads and generators in the LV grids. Also, grid reinforcement, pypsa update of time series, and probably other functionalities do not work when only the MV grid is analysed. Further ToDos are: * explain how power plants are modeled, if possible use a link * explain where to find and adjust power flow analysis defining parameters See Also -------- :func:`~.tools.pypsa_io.to_pypsa` Translator to PyPSA data format """ if timesteps is None: timesteps = self.network.timeseries.timeindex # check if timesteps is array-like, otherwise convert to list if not hasattr(timesteps, "__len__"): timesteps = [timesteps] if self.network.pypsa is None: # Translate eDisGo grid topology representation to PyPSA format self.network.pypsa = pypsa_io.to_pypsa( self.network, mode, timesteps) else: if self.network.pypsa.edisgo_mode is not mode: # Translate eDisGo grid topology representation to PyPSA format self.network.pypsa = pypsa_io.to_pypsa( self.network, mode, timesteps) # check if all timesteps are in pypsa.snapshots, if not update time # series if False in [True if _ in self.network.pypsa.snapshots else False for _ in timesteps]: pypsa_io.update_pypsa_timeseries(self.network, timesteps=timesteps) # run power flow analysis pf_results = self.network.pypsa.pf(timesteps) if all(pf_results['converged']['0'].tolist()): pypsa_io.process_pfa_results( self.network, self.network.pypsa, timesteps) else: raise ValueError("Power flow analysis did not converge.")
[ "def", "analyze", "(", "self", ",", "mode", "=", "None", ",", "timesteps", "=", "None", ")", ":", "if", "timesteps", "is", "None", ":", "timesteps", "=", "self", ".", "network", ".", "timeseries", ".", "timeindex", "# check if timesteps is array-like, otherwise convert to list", "if", "not", "hasattr", "(", "timesteps", ",", "\"__len__\"", ")", ":", "timesteps", "=", "[", "timesteps", "]", "if", "self", ".", "network", ".", "pypsa", "is", "None", ":", "# Translate eDisGo grid topology representation to PyPSA format", "self", ".", "network", ".", "pypsa", "=", "pypsa_io", ".", "to_pypsa", "(", "self", ".", "network", ",", "mode", ",", "timesteps", ")", "else", ":", "if", "self", ".", "network", ".", "pypsa", ".", "edisgo_mode", "is", "not", "mode", ":", "# Translate eDisGo grid topology representation to PyPSA format", "self", ".", "network", ".", "pypsa", "=", "pypsa_io", ".", "to_pypsa", "(", "self", ".", "network", ",", "mode", ",", "timesteps", ")", "# check if all timesteps are in pypsa.snapshots, if not update time", "# series", "if", "False", "in", "[", "True", "if", "_", "in", "self", ".", "network", ".", "pypsa", ".", "snapshots", "else", "False", "for", "_", "in", "timesteps", "]", ":", "pypsa_io", ".", "update_pypsa_timeseries", "(", "self", ".", "network", ",", "timesteps", "=", "timesteps", ")", "# run power flow analysis", "pf_results", "=", "self", ".", "network", ".", "pypsa", ".", "pf", "(", "timesteps", ")", "if", "all", "(", "pf_results", "[", "'converged'", "]", "[", "'0'", "]", ".", "tolist", "(", ")", ")", ":", "pypsa_io", ".", "process_pfa_results", "(", "self", ".", "network", ",", "self", ".", "network", ".", "pypsa", ",", "timesteps", ")", "else", ":", "raise", "ValueError", "(", "\"Power flow analysis did not converge.\"", ")" ]
45.1
0.000723
def set_trace(host=None, port=None, patch_stdstreams=False): """ Opens a remote PDB on first available port. """ if host is None: host = os.environ.get('REMOTE_PDB_HOST', '127.0.0.1') if port is None: port = int(os.environ.get('REMOTE_PDB_PORT', '0')) rdb = RemotePdb(host=host, port=port, patch_stdstreams=patch_stdstreams) rdb.set_trace(frame=sys._getframe().f_back)
[ "def", "set_trace", "(", "host", "=", "None", ",", "port", "=", "None", ",", "patch_stdstreams", "=", "False", ")", ":", "if", "host", "is", "None", ":", "host", "=", "os", ".", "environ", ".", "get", "(", "'REMOTE_PDB_HOST'", ",", "'127.0.0.1'", ")", "if", "port", "is", "None", ":", "port", "=", "int", "(", "os", ".", "environ", ".", "get", "(", "'REMOTE_PDB_PORT'", ",", "'0'", ")", ")", "rdb", "=", "RemotePdb", "(", "host", "=", "host", ",", "port", "=", "port", ",", "patch_stdstreams", "=", "patch_stdstreams", ")", "rdb", ".", "set_trace", "(", "frame", "=", "sys", ".", "_getframe", "(", ")", ".", "f_back", ")" ]
40.3
0.002427
def truncate_string(value, max_width=None): """Truncate string values.""" if isinstance(value, text_type) and max_width is not None and len(value) > max_width: return value[:max_width] return value
[ "def", "truncate_string", "(", "value", ",", "max_width", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "text_type", ")", "and", "max_width", "is", "not", "None", "and", "len", "(", "value", ")", ">", "max_width", ":", "return", "value", "[", ":", "max_width", "]", "return", "value" ]
42.6
0.009217
def plan_results(self, project_key, plan_key, expand=None, favourite=False, clover_enabled=False, label=None, issue_key=None, start_index=0, max_results=25): """ Get Plan results :param project_key: :param plan_key: :param expand: :param favourite: :param clover_enabled: :param label: :param issue_key: :param start_index: :param max_results: :return: """ return self.results(project_key, plan_key, expand=expand, favourite=favourite, clover_enabled=clover_enabled, label=label, issue_key=issue_key, start_index=start_index, max_results=max_results)
[ "def", "plan_results", "(", "self", ",", "project_key", ",", "plan_key", ",", "expand", "=", "None", ",", "favourite", "=", "False", ",", "clover_enabled", "=", "False", ",", "label", "=", "None", ",", "issue_key", "=", "None", ",", "start_index", "=", "0", ",", "max_results", "=", "25", ")", ":", "return", "self", ".", "results", "(", "project_key", ",", "plan_key", ",", "expand", "=", "expand", ",", "favourite", "=", "favourite", ",", "clover_enabled", "=", "clover_enabled", ",", "label", "=", "label", ",", "issue_key", "=", "issue_key", ",", "start_index", "=", "start_index", ",", "max_results", "=", "max_results", ")" ]
40.882353
0.008439
def cylinder(target, throat_length='throat.length', throat_diameter='throat.diameter'): r""" Calculate throat volume assuing a cylindrical shape Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_length and throat_diameter : strings The dictionary keys containing the arrays with the throat diameter and length values. Notes ----- At present this models does NOT account for the volume reprsented by the intersection of the throat with a spherical pore body. """ leng = target[throat_length] diam = target[throat_diameter] value = _sp.pi/4*leng*diam**2 return value
[ "def", "cylinder", "(", "target", ",", "throat_length", "=", "'throat.length'", ",", "throat_diameter", "=", "'throat.diameter'", ")", ":", "leng", "=", "target", "[", "throat_length", "]", "diam", "=", "target", "[", "throat_diameter", "]", "value", "=", "_sp", ".", "pi", "/", "4", "*", "leng", "*", "diam", "**", "2", "return", "value" ]
32.52
0.001195
def merge_webhooks_runset(runset): """Make some statistics on the run set. """ min_started_at = min([w['started_at'] for w in runset]) max_ended_at = max([w['ended_at'] for w in runset]) ellapse = max_ended_at - min_started_at errors_count = sum(1 for w in runset if 'error' in w) total_count = len(runset) data = dict( ellapse=ellapse, errors_count=errors_count, total_count=total_count, ) return data
[ "def", "merge_webhooks_runset", "(", "runset", ")", ":", "min_started_at", "=", "min", "(", "[", "w", "[", "'started_at'", "]", "for", "w", "in", "runset", "]", ")", "max_ended_at", "=", "max", "(", "[", "w", "[", "'ended_at'", "]", "for", "w", "in", "runset", "]", ")", "ellapse", "=", "max_ended_at", "-", "min_started_at", "errors_count", "=", "sum", "(", "1", "for", "w", "in", "runset", "if", "'error'", "in", "w", ")", "total_count", "=", "len", "(", "runset", ")", "data", "=", "dict", "(", "ellapse", "=", "ellapse", ",", "errors_count", "=", "errors_count", ",", "total_count", "=", "total_count", ",", ")", "return", "data" ]
26.647059
0.002132
def _string(self): """:return: the string from a :class:`io.StringIO`""" file = StringIO() self.__dump_to_file(file) file.seek(0) return file.read()
[ "def", "_string", "(", "self", ")", ":", "file", "=", "StringIO", "(", ")", "self", ".", "__dump_to_file", "(", "file", ")", "file", ".", "seek", "(", "0", ")", "return", "file", ".", "read", "(", ")" ]
30.5
0.010638
def idaunpack(buf): """ Special data packing format, used in struct definitions, and .id2 files sdk functions: pack_dd etc. """ buf = bytearray(buf) def nextval(o): val = buf[o] ; o += 1 if val == 0xff: # 32 bit value val, = struct.unpack_from(">L", buf, o) o += 4 return val, o if val < 0x80: # 7 bit value return val, o val <<= 8 val |= buf[o] ; o += 1 if val < 0xc000: # 14 bit value return val & 0x3fff, o # 29 bit value val <<= 8 val |= buf[o] ; o += 1 val <<= 8 val |= buf[o] ; o += 1 return val & 0x1fffffff, o values = [] o = 0 while o < len(buf): val, o = nextval(o) values.append(val) return values
[ "def", "idaunpack", "(", "buf", ")", ":", "buf", "=", "bytearray", "(", "buf", ")", "def", "nextval", "(", "o", ")", ":", "val", "=", "buf", "[", "o", "]", "o", "+=", "1", "if", "val", "==", "0xff", ":", "# 32 bit value\r", "val", ",", "=", "struct", ".", "unpack_from", "(", "\">L\"", ",", "buf", ",", "o", ")", "o", "+=", "4", "return", "val", ",", "o", "if", "val", "<", "0x80", ":", "# 7 bit value\r", "return", "val", ",", "o", "val", "<<=", "8", "val", "|=", "buf", "[", "o", "]", "o", "+=", "1", "if", "val", "<", "0xc000", ":", "# 14 bit value\r", "return", "val", "&", "0x3fff", ",", "o", "# 29 bit value\r", "val", "<<=", "8", "val", "|=", "buf", "[", "o", "]", "o", "+=", "1", "val", "<<=", "8", "val", "|=", "buf", "[", "o", "]", "o", "+=", "1", "return", "val", "&", "0x1fffffff", ",", "o", "values", "=", "[", "]", "o", "=", "0", "while", "o", "<", "len", "(", "buf", ")", ":", "val", ",", "o", "=", "nextval", "(", "o", ")", "values", ".", "append", "(", "val", ")", "return", "values" ]
24.323529
0.010465
def get_all_requisite_objectives(self, objective_id=None): """Gets a list of Objectives that are the requisites for the given Objective including the requistes of the requisites, and so on. In plenary mode, the returned list contains all of the immediate requisites, or an error results if an Objective is not found or inaccessible. Otherwise, inaccessible Objectives may be omitted from the list and may present the elements in any order including returning a unique set. arg: objective_id (osid.id.Id): Id of the Objective return: (osid.learning.ObjectiveList) - the returned Objective list raise: NotFound - objective_id not found raise: NullArgument - objective_id is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented. """ # This should be re-implemented if and when handcar supports # getting all requisites directly requisites = list() requisite_ids = list() all_requisites = self._get_requisites_recursively(objective_id, requisites, requisite_ids) return objects.ObjectiveList(all_requisites)
[ "def", "get_all_requisite_objectives", "(", "self", ",", "objective_id", "=", "None", ")", ":", "# This should be re-implemented if and when handcar supports", "# getting all requisites directly", "requisites", "=", "list", "(", ")", "requisite_ids", "=", "list", "(", ")", "all_requisites", "=", "self", ".", "_get_requisites_recursively", "(", "objective_id", ",", "requisites", ",", "requisite_ids", ")", "return", "objects", ".", "ObjectiveList", "(", "all_requisites", ")" ]
49.230769
0.002299
def _updateParamsFrom(self, *args, **kwargs): """ :note: doc in :func:`~hwt.synthesizer.interfaceLevel.propDeclCollector._updateParamsFrom` """ for o in self: o._updateParamsFrom(*args, **kwargs)
[ "def", "_updateParamsFrom", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "o", "in", "self", ":", "o", ".", "_updateParamsFrom", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
39
0.012552
def compute_mem_overhead(self): """Returns memory overhead.""" self.mem_overhead = (self._process.memory_info().rss - builtins.initial_rss_size)
[ "def", "compute_mem_overhead", "(", "self", ")", ":", "self", ".", "mem_overhead", "=", "(", "self", ".", "_process", ".", "memory_info", "(", ")", ".", "rss", "-", "builtins", ".", "initial_rss_size", ")" ]
46.5
0.010582
def colour_rgb(self): """Return colour as RGB value""" hexvalue = self.status()[self.DPS][self.DPS_INDEX_COLOUR] return BulbDevice._hexvalue_to_rgb(hexvalue)
[ "def", "colour_rgb", "(", "self", ")", ":", "hexvalue", "=", "self", ".", "status", "(", ")", "[", "self", ".", "DPS", "]", "[", "self", ".", "DPS_INDEX_COLOUR", "]", "return", "BulbDevice", ".", "_hexvalue_to_rgb", "(", "hexvalue", ")" ]
44.5
0.01105
def EQ106(T, Tc, A, B, C=0, D=0, E=0): r'''DIPPR Equation #106. Often used in calculating liquid surface tension, and heat of vaporization. Only parameters A and B parameters are required; many fits include no further parameters. Critical temperature is also required. .. math:: Y = A(1-T_r)^{B + C T_r + D T_r^2 + E T_r^3} Tr = \frac{T}{Tc} Parameters ---------- T : float Temperature, [K] Tc : float Critical temperature, [K] A-D : float Parameter for the equation; chemical and property specific [-] Returns ------- Y : float Property [constant-specific] Notes ----- The integral could not be found, but the integral over T actually could, again in terms of hypergeometric functions. Examples -------- Water surface tension; DIPPR coefficients normally in Pa*s. >>> EQ106(300, 647.096, 0.17766, 2.567, -3.3377, 1.9699) 0.07231499373541 References ---------- .. [1] Design Institute for Physical Properties, 1996. DIPPR Project 801 DIPPR/AIChE ''' Tr = T/Tc return A*(1. - Tr)**(B + Tr*(C + Tr*(D + E*Tr)))
[ "def", "EQ106", "(", "T", ",", "Tc", ",", "A", ",", "B", ",", "C", "=", "0", ",", "D", "=", "0", ",", "E", "=", "0", ")", ":", "Tr", "=", "T", "/", "Tc", "return", "A", "*", "(", "1.", "-", "Tr", ")", "**", "(", "B", "+", "Tr", "*", "(", "C", "+", "Tr", "*", "(", "D", "+", "E", "*", "Tr", ")", ")", ")" ]
25.909091
0.000845
def append_position_to_token_list(token_list): """Converts a list of Token into a list of Token, asuming size == 1""" return [PositionToken(value.content, value.gd, index, index+1) for (index, value) in enumerate(token_list)]
[ "def", "append_position_to_token_list", "(", "token_list", ")", ":", "return", "[", "PositionToken", "(", "value", ".", "content", ",", "value", ".", "gd", ",", "index", ",", "index", "+", "1", ")", "for", "(", "index", ",", "value", ")", "in", "enumerate", "(", "token_list", ")", "]" ]
77
0.008584
def delete_mappings_in_network(network_id, network_2_id=None, **kwargs): """ Delete all the resource attribute mappings in a network. If another network is specified, only delete the mappings between the two networks. """ qry = db.DBSession.query(ResourceAttrMap).filter(or_(ResourceAttrMap.network_a_id == network_id, ResourceAttrMap.network_b_id == network_id)) if network_2_id is not None: qry = qry.filter(or_(ResourceAttrMap.network_a_id==network_2_id, ResourceAttrMap.network_b_id==network_2_id)) mappings = qry.all() for m in mappings: db.DBSession.delete(m) db.DBSession.flush() return 'OK'
[ "def", "delete_mappings_in_network", "(", "network_id", ",", "network_2_id", "=", "None", ",", "*", "*", "kwargs", ")", ":", "qry", "=", "db", ".", "DBSession", ".", "query", "(", "ResourceAttrMap", ")", ".", "filter", "(", "or_", "(", "ResourceAttrMap", ".", "network_a_id", "==", "network_id", ",", "ResourceAttrMap", ".", "network_b_id", "==", "network_id", ")", ")", "if", "network_2_id", "is", "not", "None", ":", "qry", "=", "qry", ".", "filter", "(", "or_", "(", "ResourceAttrMap", ".", "network_a_id", "==", "network_2_id", ",", "ResourceAttrMap", ".", "network_b_id", "==", "network_2_id", ")", ")", "mappings", "=", "qry", ".", "all", "(", ")", "for", "m", "in", "mappings", ":", "db", ".", "DBSession", ".", "delete", "(", "m", ")", "db", ".", "DBSession", ".", "flush", "(", ")", "return", "'OK'" ]
38.235294
0.009009
def register_new_suffix_tree(case_insensitive=False): """Factory method, returns new suffix tree object. """ assert isinstance(case_insensitive, bool) root_node = register_new_node() suffix_tree_id = uuid4() event = SuffixTree.Created( originator_id=suffix_tree_id, root_node_id=root_node.id, case_insensitive=case_insensitive, ) entity = SuffixTree.mutate(event=event) assert isinstance(entity, SuffixTree) entity.nodes[root_node.id] = root_node publish(event) return entity
[ "def", "register_new_suffix_tree", "(", "case_insensitive", "=", "False", ")", ":", "assert", "isinstance", "(", "case_insensitive", ",", "bool", ")", "root_node", "=", "register_new_node", "(", ")", "suffix_tree_id", "=", "uuid4", "(", ")", "event", "=", "SuffixTree", ".", "Created", "(", "originator_id", "=", "suffix_tree_id", ",", "root_node_id", "=", "root_node", ".", "id", ",", "case_insensitive", "=", "case_insensitive", ",", ")", "entity", "=", "SuffixTree", ".", "mutate", "(", "event", "=", "event", ")", "assert", "isinstance", "(", "entity", ",", "SuffixTree", ")", "entity", ".", "nodes", "[", "root_node", ".", "id", "]", "=", "root_node", "publish", "(", "event", ")", "return", "entity" ]
25.333333
0.001812
def get_or_create_in_transaction_wrapper(tsession, model, values, missing_columns = [], variable_columns = [], updatable_columns = [], only_use_supplied_columns = False, read_only = False): '''This function can be used to determine which calling method is spending time in get_or_create_in_transaction when profiling the database API. Switch out calls to get_or_create_in_transaction to get_or_create_in_transaction_wrapper in the suspected functions to determine where the pain lies.''' return get_or_create_in_transaction(tsession, model, values, missing_columns = missing_columns, variable_columns = variable_columns, updatable_columns = updatable_columns, only_use_supplied_columns = only_use_supplied_columns, read_only = read_only)
[ "def", "get_or_create_in_transaction_wrapper", "(", "tsession", ",", "model", ",", "values", ",", "missing_columns", "=", "[", "]", ",", "variable_columns", "=", "[", "]", ",", "updatable_columns", "=", "[", "]", ",", "only_use_supplied_columns", "=", "False", ",", "read_only", "=", "False", ")", ":", "return", "get_or_create_in_transaction", "(", "tsession", ",", "model", ",", "values", ",", "missing_columns", "=", "missing_columns", ",", "variable_columns", "=", "variable_columns", ",", "updatable_columns", "=", "updatable_columns", ",", "only_use_supplied_columns", "=", "only_use_supplied_columns", ",", "read_only", "=", "read_only", ")" ]
187.25
0.033245
def download_url(url, save_as, iter_size=_default_iter_size, enable_verbose=True): """A simple url binary content download function with progress info. Warning: this function will silently overwrite existing file. """ msg = Messenger() if enable_verbose: msg.on() else: msg.off() msg.show("Downloading %s from %s..." % (save_as, url)) with open(save_as, "wb") as f: response = requests.get(url, stream=True) if not response.ok: print("http get error!") return start_time = time.clock() downloaded_size = 0 for block in response.iter_content(iter_size): if not block: break f.write(block) elapse = datetime.timedelta(seconds=(time.clock() - start_time)) downloaded_size += sys.getsizeof(block) msg.show(" Finished %s, elapse %s." % ( string_SizeInBytes(downloaded_size), elapse )) msg.show(" Complete!")
[ "def", "download_url", "(", "url", ",", "save_as", ",", "iter_size", "=", "_default_iter_size", ",", "enable_verbose", "=", "True", ")", ":", "msg", "=", "Messenger", "(", ")", "if", "enable_verbose", ":", "msg", ".", "on", "(", ")", "else", ":", "msg", ".", "off", "(", ")", "msg", ".", "show", "(", "\"Downloading %s from %s...\"", "%", "(", "save_as", ",", "url", ")", ")", "with", "open", "(", "save_as", ",", "\"wb\"", ")", "as", "f", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ")", "if", "not", "response", ".", "ok", ":", "print", "(", "\"http get error!\"", ")", "return", "start_time", "=", "time", ".", "clock", "(", ")", "downloaded_size", "=", "0", "for", "block", "in", "response", ".", "iter_content", "(", "iter_size", ")", ":", "if", "not", "block", ":", "break", "f", ".", "write", "(", "block", ")", "elapse", "=", "datetime", ".", "timedelta", "(", "seconds", "=", "(", "time", ".", "clock", "(", ")", "-", "start_time", ")", ")", "downloaded_size", "+=", "sys", ".", "getsizeof", "(", "block", ")", "msg", ".", "show", "(", "\" Finished %s, elapse %s.\"", "%", "(", "string_SizeInBytes", "(", "downloaded_size", ")", ",", "elapse", ")", ")", "msg", ".", "show", "(", "\" Complete!\"", ")" ]
30.363636
0.001934
def create_dvportgroup(portgroup_dict, portgroup_name, dvs, service_instance=None): ''' Creates a distributed virtual portgroup. Note: The ``portgroup_name`` param will override any name already set in ``portgroup_dict``. portgroup_dict Dictionary with the config values the portgroup should be created with (example in salt.states.dvs). portgroup_name Name of the portgroup to be created. dvs Name of the DVS that will contain the portgroup. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_dvportgroup portgroup_dict=<dict> portgroup_name=pg1 dvs=dvs1 ''' log.trace('Creating portgroup\'%s\' in dvs \'%s\' ' 'with dict = %s', portgroup_name, dvs, portgroup_dict) proxy_type = get_proxy_type() if proxy_type == 'esxdatacenter': datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] dc_ref = _get_proxy_target(service_instance) elif proxy_type == 'esxcluster': datacenter = __salt__['esxcluster.get_details']()['datacenter'] dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) if not dvs_refs: raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' 'retrieved'.format(dvs)) # Make the name of the dvportgroup consistent with the parameter portgroup_dict['name'] = portgroup_name spec = vim.DVPortgroupConfigSpec() _apply_dvportgroup_config(portgroup_name, spec, portgroup_dict) salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec) return True
[ "def", "create_dvportgroup", "(", "portgroup_dict", ",", "portgroup_name", ",", "dvs", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'Creating portgroup\\'%s\\' in dvs \\'%s\\' '", "'with dict = %s'", ",", "portgroup_name", ",", "dvs", ",", "portgroup_dict", ")", "proxy_type", "=", "get_proxy_type", "(", ")", "if", "proxy_type", "==", "'esxdatacenter'", ":", "datacenter", "=", "__salt__", "[", "'esxdatacenter.get_details'", "]", "(", ")", "[", "'datacenter'", "]", "dc_ref", "=", "_get_proxy_target", "(", "service_instance", ")", "elif", "proxy_type", "==", "'esxcluster'", ":", "datacenter", "=", "__salt__", "[", "'esxcluster.get_details'", "]", "(", ")", "[", "'datacenter'", "]", "dc_ref", "=", "salt", ".", "utils", ".", "vmware", ".", "get_datacenter", "(", "service_instance", ",", "datacenter", ")", "dvs_refs", "=", "salt", ".", "utils", ".", "vmware", ".", "get_dvss", "(", "dc_ref", ",", "dvs_names", "=", "[", "dvs", "]", ")", "if", "not", "dvs_refs", ":", "raise", "VMwareObjectRetrievalError", "(", "'DVS \\'{0}\\' was not '", "'retrieved'", ".", "format", "(", "dvs", ")", ")", "# Make the name of the dvportgroup consistent with the parameter", "portgroup_dict", "[", "'name'", "]", "=", "portgroup_name", "spec", "=", "vim", ".", "DVPortgroupConfigSpec", "(", ")", "_apply_dvportgroup_config", "(", "portgroup_name", ",", "spec", ",", "portgroup_dict", ")", "salt", ".", "utils", ".", "vmware", ".", "create_dvportgroup", "(", "dvs_refs", "[", "0", "]", ",", "spec", ")", "return", "True" ]
38.326087
0.000553
def _onLeftButtonDClick(self, evt): """Start measuring on an axis.""" x = evt.GetX() y = self.figure.bbox.height - evt.GetY() evt.Skip() self.CaptureMouse() FigureCanvasBase.button_press_event(self, x, y, 1, dblclick=True, guiEvent=evt)
[ "def", "_onLeftButtonDClick", "(", "self", ",", "evt", ")", ":", "x", "=", "evt", ".", "GetX", "(", ")", "y", "=", "self", ".", "figure", ".", "bbox", ".", "height", "-", "evt", ".", "GetY", "(", ")", "evt", ".", "Skip", "(", ")", "self", ".", "CaptureMouse", "(", ")", "FigureCanvasBase", ".", "button_press_event", "(", "self", ",", "x", ",", "y", ",", "1", ",", "dblclick", "=", "True", ",", "guiEvent", "=", "evt", ")" ]
39.714286
0.010563
def _position_in_feature(pos_a, pos_b): """return distance to 3' and 5' end of the feature""" strd = "-" if pos_a[2] in pos_b[2]: strd = "+" if pos_a[2] in "+" and pos_b[2] in "+": lento5 = pos_a[0] - pos_b[1] + 1 lento3 = pos_a[1] - pos_b[1] + 1 if pos_a[2] in "+" and pos_b[2] in "-": lento5 = pos_a[1] - pos_b[0] + 1 lento3 = pos_a[0] - pos_b[1] + 1 if pos_a[2] in "-" and pos_b[2] in "+": lento5 = pos_a[0] - pos_b[1] + 1 lento3 = pos_a[1] - pos_b[0] + 1 if pos_a[2] in "-" and pos_b[2] in "-": lento3 = pos_a[0] - pos_b[0] + 1 lento5 = pos_a[1] - pos_b[1] + 1 else: lento5 = pos_a[0] - pos_b[0] + 1 lento3 = pos_a[1] - pos_b[1] + 1 return lento5, lento3, strd
[ "def", "_position_in_feature", "(", "pos_a", ",", "pos_b", ")", ":", "strd", "=", "\"-\"", "if", "pos_a", "[", "2", "]", "in", "pos_b", "[", "2", "]", ":", "strd", "=", "\"+\"", "if", "pos_a", "[", "2", "]", "in", "\"+\"", "and", "pos_b", "[", "2", "]", "in", "\"+\"", ":", "lento5", "=", "pos_a", "[", "0", "]", "-", "pos_b", "[", "1", "]", "+", "1", "lento3", "=", "pos_a", "[", "1", "]", "-", "pos_b", "[", "1", "]", "+", "1", "if", "pos_a", "[", "2", "]", "in", "\"+\"", "and", "pos_b", "[", "2", "]", "in", "\"-\"", ":", "lento5", "=", "pos_a", "[", "1", "]", "-", "pos_b", "[", "0", "]", "+", "1", "lento3", "=", "pos_a", "[", "0", "]", "-", "pos_b", "[", "1", "]", "+", "1", "if", "pos_a", "[", "2", "]", "in", "\"-\"", "and", "pos_b", "[", "2", "]", "in", "\"+\"", ":", "lento5", "=", "pos_a", "[", "0", "]", "-", "pos_b", "[", "1", "]", "+", "1", "lento3", "=", "pos_a", "[", "1", "]", "-", "pos_b", "[", "0", "]", "+", "1", "if", "pos_a", "[", "2", "]", "in", "\"-\"", "and", "pos_b", "[", "2", "]", "in", "\"-\"", ":", "lento3", "=", "pos_a", "[", "0", "]", "-", "pos_b", "[", "0", "]", "+", "1", "lento5", "=", "pos_a", "[", "1", "]", "-", "pos_b", "[", "1", "]", "+", "1", "else", ":", "lento5", "=", "pos_a", "[", "0", "]", "-", "pos_b", "[", "0", "]", "+", "1", "lento3", "=", "pos_a", "[", "1", "]", "-", "pos_b", "[", "1", "]", "+", "1", "return", "lento5", ",", "lento3", ",", "strd" ]
36.571429
0.001269
def remove_password_from_url(url): # type: (S) -> S """ Given a url, remove the password and insert 4 dashes :param url: The url to replace the authentication in :type url: S :return: The new URL without authentication :rtype: S """ parsed = _get_parsed_url(url) if parsed.auth: auth, _, _ = parsed.auth.partition(":") return parsed._replace(auth="{auth}:----".format(auth=auth)).url return parsed.url
[ "def", "remove_password_from_url", "(", "url", ")", ":", "# type: (S) -> S", "parsed", "=", "_get_parsed_url", "(", "url", ")", "if", "parsed", ".", "auth", ":", "auth", ",", "_", ",", "_", "=", "parsed", ".", "auth", ".", "partition", "(", "\":\"", ")", "return", "parsed", ".", "_replace", "(", "auth", "=", "\"{auth}:----\"", ".", "format", "(", "auth", "=", "auth", ")", ")", ".", "url", "return", "parsed", ".", "url" ]
28
0.00216
def from_zipfile(cls, path, filename, encoding, dialect, fields, converters): """Read delimited text from zipfile.""" stream = ZipReader(path, filename).readlines(encoding) return cls(stream, dialect, fields, converters)
[ "def", "from_zipfile", "(", "cls", ",", "path", ",", "filename", ",", "encoding", ",", "dialect", ",", "fields", ",", "converters", ")", ":", "stream", "=", "ZipReader", "(", "path", ",", "filename", ")", ".", "readlines", "(", "encoding", ")", "return", "cls", "(", "stream", ",", "dialect", ",", "fields", ",", "converters", ")" ]
48.2
0.008163
def get_checklist(self, id, name=None): ''' Get a checklist Returns: Checklist: The checklist with the given `id` ''' return self.create_checklist(dict(id=id, name=name))
[ "def", "get_checklist", "(", "self", ",", "id", ",", "name", "=", "None", ")", ":", "return", "self", ".", "create_checklist", "(", "dict", "(", "id", "=", "id", ",", "name", "=", "name", ")", ")" ]
27
0.008969
def _get_ssl(self): """Get an SMTP session with SSL.""" return smtplib.SMTP_SSL( self.server, self.port, context=ssl.create_default_context() )
[ "def", "_get_ssl", "(", "self", ")", ":", "return", "smtplib", ".", "SMTP_SSL", "(", "self", ".", "server", ",", "self", ".", "port", ",", "context", "=", "ssl", ".", "create_default_context", "(", ")", ")" ]
35
0.011173
def get_turnover(positions, transactions, denominator='AGB'): """ - Value of purchases and sales divided by either the actual gross book or the portfolio value for the time step. Parameters ---------- positions : pd.DataFrame Contains daily position values including cash. - See full explanation in tears.create_full_tear_sheet transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in tears.create_full_tear_sheet denominator : str, optional Either 'AGB' or 'portfolio_value', default AGB. - AGB (Actual gross book) is the gross market value (GMV) of the specific algo being analyzed. Swapping out an entire portfolio of stocks for another will yield 200% turnover, not 100%, since transactions are being made for both sides. - We use average of the previous and the current end-of-period AGB to avoid singularities when trading only into or out of an entire book in one trading period. - portfolio_value is the total value of the algo's positions end-of-period, including cash. Returns ------- turnover_rate : pd.Series timeseries of portfolio turnover rates. """ txn_vol = get_txn_vol(transactions) traded_value = txn_vol.txn_volume if denominator == 'AGB': # Actual gross book is the same thing as the algo's GMV # We want our denom to be avg(AGB previous, AGB current) AGB = positions.drop('cash', axis=1).abs().sum(axis=1) denom = AGB.rolling(2).mean() # Since the first value of pd.rolling returns NaN, we # set our "day 0" AGB to 0. denom.iloc[0] = AGB.iloc[0] / 2 elif denominator == 'portfolio_value': denom = positions.sum(axis=1) else: raise ValueError( "Unexpected value for denominator '{}'. The " "denominator parameter must be either 'AGB'" " or 'portfolio_value'.".format(denominator) ) denom.index = denom.index.normalize() turnover = traded_value.div(denom, axis='index') turnover = turnover.fillna(0) return turnover
[ "def", "get_turnover", "(", "positions", ",", "transactions", ",", "denominator", "=", "'AGB'", ")", ":", "txn_vol", "=", "get_txn_vol", "(", "transactions", ")", "traded_value", "=", "txn_vol", ".", "txn_volume", "if", "denominator", "==", "'AGB'", ":", "# Actual gross book is the same thing as the algo's GMV", "# We want our denom to be avg(AGB previous, AGB current)", "AGB", "=", "positions", ".", "drop", "(", "'cash'", ",", "axis", "=", "1", ")", ".", "abs", "(", ")", ".", "sum", "(", "axis", "=", "1", ")", "denom", "=", "AGB", ".", "rolling", "(", "2", ")", ".", "mean", "(", ")", "# Since the first value of pd.rolling returns NaN, we", "# set our \"day 0\" AGB to 0.", "denom", ".", "iloc", "[", "0", "]", "=", "AGB", ".", "iloc", "[", "0", "]", "/", "2", "elif", "denominator", "==", "'portfolio_value'", ":", "denom", "=", "positions", ".", "sum", "(", "axis", "=", "1", ")", "else", ":", "raise", "ValueError", "(", "\"Unexpected value for denominator '{}'. The \"", "\"denominator parameter must be either 'AGB'\"", "\" or 'portfolio_value'.\"", ".", "format", "(", "denominator", ")", ")", "denom", ".", "index", "=", "denom", ".", "index", ".", "normalize", "(", ")", "turnover", "=", "traded_value", ".", "div", "(", "denom", ",", "axis", "=", "'index'", ")", "turnover", "=", "turnover", ".", "fillna", "(", "0", ")", "return", "turnover" ]
37.448276
0.000449
def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port, public_port, expected): """Validate endpoint data. Validate actual endpoint data vs expected endpoint data. The ports are used to find the matching endpoint. """ self.log.debug('Validating endpoint data...') self.log.debug('actual: {}'.format(repr(endpoints))) found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) if (admin_port in ep.adminurl and internal_port in ep.internalurl and public_port in ep.publicurl): found = True actual = {'id': ep.id, 'region': ep.region, 'adminurl': ep.adminurl, 'internalurl': ep.internalurl, 'publicurl': ep.publicurl, 'service_id': ep.service_id} ret = self._validate_dict_data(expected, actual) if ret: return 'unexpected endpoint data - {}'.format(ret) if not found: return 'endpoint not found'
[ "def", "validate_v2_endpoint_data", "(", "self", ",", "endpoints", ",", "admin_port", ",", "internal_port", ",", "public_port", ",", "expected", ")", ":", "self", ".", "log", ".", "debug", "(", "'Validating endpoint data...'", ")", "self", ".", "log", ".", "debug", "(", "'actual: {}'", ".", "format", "(", "repr", "(", "endpoints", ")", ")", ")", "found", "=", "False", "for", "ep", "in", "endpoints", ":", "self", ".", "log", ".", "debug", "(", "'endpoint: {}'", ".", "format", "(", "repr", "(", "ep", ")", ")", ")", "if", "(", "admin_port", "in", "ep", ".", "adminurl", "and", "internal_port", "in", "ep", ".", "internalurl", "and", "public_port", "in", "ep", ".", "publicurl", ")", ":", "found", "=", "True", "actual", "=", "{", "'id'", ":", "ep", ".", "id", ",", "'region'", ":", "ep", ".", "region", ",", "'adminurl'", ":", "ep", ".", "adminurl", ",", "'internalurl'", ":", "ep", ".", "internalurl", ",", "'publicurl'", ":", "ep", ".", "publicurl", ",", "'service_id'", ":", "ep", ".", "service_id", "}", "ret", "=", "self", ".", "_validate_dict_data", "(", "expected", ",", "actual", ")", "if", "ret", ":", "return", "'unexpected endpoint data - {}'", ".", "format", "(", "ret", ")", "if", "not", "found", ":", "return", "'endpoint not found'" ]
43.428571
0.002414
def add_config(self, key, type_, default=NOT_SET, env_var=None): """Add a configuration setting. Parameters ---------- key : str The name of the configuration setting. This must be a valid Python attribute name i.e. alphanumeric with underscores. type : function A function such as ``float``, ``int`` or ``str`` which takes the configuration value and returns an object of the correct type. Note that the values retrieved from environment variables are always strings, while those retrieved from the YAML file might already be parsed. Hence, the function provided here must accept both types of input. default : object, optional The default configuration to return if not set. By default none is set and an error is raised instead. env_var : str, optional The environment variable name that holds this configuration value. If not given, this configuration can only be set in the YAML configuration file. """ self.config[key] = {'type': type_} if env_var is not None: self.config[key]['env_var'] = env_var if default is not NOT_SET: self.config[key]['default'] = default
[ "def", "add_config", "(", "self", ",", "key", ",", "type_", ",", "default", "=", "NOT_SET", ",", "env_var", "=", "None", ")", ":", "self", ".", "config", "[", "key", "]", "=", "{", "'type'", ":", "type_", "}", "if", "env_var", "is", "not", "None", ":", "self", ".", "config", "[", "key", "]", "[", "'env_var'", "]", "=", "env_var", "if", "default", "is", "not", "NOT_SET", ":", "self", ".", "config", "[", "key", "]", "[", "'default'", "]", "=", "default" ]
45.241379
0.001493
def minus(self, a): """ Subtract. """ return Vector(self.x-a.x, self.y-a.y, self.z-a.z)
[ "def", "minus", "(", "self", ",", "a", ")", ":", "return", "Vector", "(", "self", ".", "x", "-", "a", ".", "x", ",", "self", ".", "y", "-", "a", ".", "y", ",", "self", ".", "z", "-", "a", ".", "z", ")" ]
33.666667
0.019417
def gen_mac(prefix='AC:DE:48'): ''' Generates a MAC address with the defined OUI prefix. Common prefixes: - ``00:16:3E`` -- Xen - ``00:18:51`` -- OpenVZ - ``00:50:56`` -- VMware (manually generated) - ``52:54:00`` -- QEMU/KVM - ``AC:DE:48`` -- PRIVATE References: - http://standards.ieee.org/develop/regauth/oui/oui.txt - https://www.wireshark.org/tools/oui-lookup.html - https://en.wikipedia.org/wiki/MAC_address ''' return '{0}:{1:02X}:{2:02X}:{3:02X}'.format(prefix, random.randint(0, 0xff), random.randint(0, 0xff), random.randint(0, 0xff))
[ "def", "gen_mac", "(", "prefix", "=", "'AC:DE:48'", ")", ":", "return", "'{0}:{1:02X}:{2:02X}:{3:02X}'", ".", "format", "(", "prefix", ",", "random", ".", "randint", "(", "0", ",", "0xff", ")", ",", "random", ".", "randint", "(", "0", ",", "0xff", ")", ",", "random", ".", "randint", "(", "0", ",", "0xff", ")", ")" ]
33.409091
0.001323
def data(self, index, role): '''Return data for *index* according to *role*.''' if not index.isValid(): return None column = index.column() item = index.internalPointer() if role == self.ITEM_ROLE: return item elif role == Qt.DisplayRole: if column == 0: return item.name elif column == 1: if item.size: return item.size elif column == 2: return item.type elif column == 3: if item.modified is not None: return item.modified.strftime('%c') elif role == Qt.DecorationRole: if column == 0: return self.iconFactory.icon(item) elif role == Qt.TextAlignmentRole: if column == 1: return Qt.AlignRight else: return Qt.AlignLeft return None
[ "def", "data", "(", "self", ",", "index", ",", "role", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", ":", "return", "None", "column", "=", "index", ".", "column", "(", ")", "item", "=", "index", ".", "internalPointer", "(", ")", "if", "role", "==", "self", ".", "ITEM_ROLE", ":", "return", "item", "elif", "role", "==", "Qt", ".", "DisplayRole", ":", "if", "column", "==", "0", ":", "return", "item", ".", "name", "elif", "column", "==", "1", ":", "if", "item", ".", "size", ":", "return", "item", ".", "size", "elif", "column", "==", "2", ":", "return", "item", ".", "type", "elif", "column", "==", "3", ":", "if", "item", ".", "modified", "is", "not", "None", ":", "return", "item", ".", "modified", ".", "strftime", "(", "'%c'", ")", "elif", "role", "==", "Qt", ".", "DecorationRole", ":", "if", "column", "==", "0", ":", "return", "self", ".", "iconFactory", ".", "icon", "(", "item", ")", "elif", "role", "==", "Qt", ".", "TextAlignmentRole", ":", "if", "column", "==", "1", ":", "return", "Qt", ".", "AlignRight", "else", ":", "return", "Qt", ".", "AlignLeft", "return", "None" ]
26.771429
0.00206
def get_membrane_xml(self, pdb_id): ''' Returns the <MEMBRANE> tag XML for pdb_id if the tag exists.''' self.tmp_string = None context = etree.iterparse(io.BytesIO(self.xml_contents), events=('end',), tag=self.PDBTM_entry_tag_type) try: fast_iter(context, self._get_membrane_xml, pdb_id = pdb_id.upper()) except EarlyOut: pass return self.tmp_string
[ "def", "get_membrane_xml", "(", "self", ",", "pdb_id", ")", ":", "self", ".", "tmp_string", "=", "None", "context", "=", "etree", ".", "iterparse", "(", "io", ".", "BytesIO", "(", "self", ".", "xml_contents", ")", ",", "events", "=", "(", "'end'", ",", ")", ",", "tag", "=", "self", ".", "PDBTM_entry_tag_type", ")", "try", ":", "fast_iter", "(", "context", ",", "self", ".", "_get_membrane_xml", ",", "pdb_id", "=", "pdb_id", ".", "upper", "(", ")", ")", "except", "EarlyOut", ":", "pass", "return", "self", ".", "tmp_string" ]
50.25
0.01467
def get_all_roles(path_prefix=None, region=None, key=None, keyid=None, profile=None): ''' Get and return all IAM role details, starting at the optional path. .. versionadded:: 2016.3.0 CLI Example: salt-call boto_iam.get_all_roles ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not conn: return None _roles = conn.list_roles(path_prefix=path_prefix) roles = _roles.list_roles_response.list_roles_result.roles marker = getattr( _roles.list_roles_response.list_roles_result, 'marker', None ) while marker: _roles = conn.list_roles(path_prefix=path_prefix, marker=marker) roles = roles + _roles.list_roles_response.list_roles_result.roles marker = getattr( _roles.list_roles_response.list_roles_result, 'marker', None ) return roles
[ "def", "get_all_roles", "(", "path_prefix", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "not", "conn", ":", "return", "None", "_roles", "=", "conn", ".", "list_roles", "(", "path_prefix", "=", "path_prefix", ")", "roles", "=", "_roles", ".", "list_roles_response", ".", "list_roles_result", ".", "roles", "marker", "=", "getattr", "(", "_roles", ".", "list_roles_response", ".", "list_roles_result", ",", "'marker'", ",", "None", ")", "while", "marker", ":", "_roles", "=", "conn", ".", "list_roles", "(", "path_prefix", "=", "path_prefix", ",", "marker", "=", "marker", ")", "roles", "=", "roles", "+", "_roles", ".", "list_roles_response", ".", "list_roles_result", ".", "roles", "marker", "=", "getattr", "(", "_roles", ".", "list_roles_response", ".", "list_roles_result", ",", "'marker'", ",", "None", ")", "return", "roles" ]
33.653846
0.002222
def rename_document(self, old_path, new_path): """ Renames an already opened document (this will not rename the file, just update the file path and tab title). Use that function to update a file that has been renamed externally. :param old_path: old path (path of the widget to rename with ``new_path`` :param new_path: new path that will be used to rename the tab. """ to_rename = [] title = os.path.split(new_path)[1] for widget in self.widgets(include_clones=True): p = os.path.normpath(os.path.normcase(widget.file.path)) old_path = os.path.normpath(os.path.normcase(old_path)) if p == old_path: to_rename.append(widget) for widget in to_rename: tw = widget.parent_tab_widget widget.file._path = new_path tw.setTabText(tw.indexOf(widget), title)
[ "def", "rename_document", "(", "self", ",", "old_path", ",", "new_path", ")", ":", "to_rename", "=", "[", "]", "title", "=", "os", ".", "path", ".", "split", "(", "new_path", ")", "[", "1", "]", "for", "widget", "in", "self", ".", "widgets", "(", "include_clones", "=", "True", ")", ":", "p", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "normcase", "(", "widget", ".", "file", ".", "path", ")", ")", "old_path", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "normcase", "(", "old_path", ")", ")", "if", "p", "==", "old_path", ":", "to_rename", ".", "append", "(", "widget", ")", "for", "widget", "in", "to_rename", ":", "tw", "=", "widget", ".", "parent_tab_widget", "widget", ".", "file", ".", "_path", "=", "new_path", "tw", ".", "setTabText", "(", "tw", ".", "indexOf", "(", "widget", ")", ",", "title", ")" ]
41.727273
0.00213
def _create_svc_vm_hosting_devices(self, context, num, template): """Creates <num> or less service VM instances based on <template>. These hosting devices can be bound to a certain tenant or for shared use. A list with the created hosting device VMs is returned. """ hosting_devices = [] template_id = template['id'] credentials_id = template['default_credentials_id'] plugging_drv = self.get_hosting_device_plugging_driver(context, template_id) hosting_device_drv = self.get_hosting_device_driver(context, template_id) if plugging_drv is None or hosting_device_drv is None or num <= 0: return hosting_devices #TODO(bobmel): Determine value for max_hosted properly max_hosted = 1 # template['slot_capacity'] dev_data, mgmt_context = self._get_resources_properties_for_hd( template, credentials_id) credentials_info = self._credentials.get(credentials_id) if credentials_info is None: LOG.error('Could not find credentials for hosting device' 'template %s. Aborting VM hosting device creation.', template_id) return hosting_devices connectivity_info = self._get_mgmt_connectivity_info( context, self.mgmt_subnet_id()) for i in range(num): complementary_id = uuidutils.generate_uuid() res = plugging_drv.create_hosting_device_resources( context, complementary_id, self.l3_tenant_id(), mgmt_context, max_hosted) if res.get('mgmt_port') is None: # Required ports could not be created return hosting_devices connectivity_info['mgmt_port'] = res['mgmt_port'] vm_instance = self.svc_vm_mgr.dispatch_service_vm( context, template['name'] + '_nrouter', template['image'], template['flavor'], hosting_device_drv, credentials_info, connectivity_info, res.get('ports')) if vm_instance is not None: dev_data.update( {'id': vm_instance['id'], 'complementary_id': complementary_id, 'management_ip_address': res['mgmt_port'][ 'fixed_ips'][0]['ip_address'], 'management_port_id': res['mgmt_port']['id']}) self.create_hosting_device(context, {'hosting_device': dev_data}) hosting_devices.append(vm_instance) else: # Fundamental error like could not contact Nova # Cleanup anything we created plugging_drv.delete_hosting_device_resources( context, self.l3_tenant_id(), **res) break LOG.info('Created %(num)d hosting device VMs based on template ' '%(t_id)s', {'num': len(hosting_devices), 't_id': template_id}) return hosting_devices
[ "def", "_create_svc_vm_hosting_devices", "(", "self", ",", "context", ",", "num", ",", "template", ")", ":", "hosting_devices", "=", "[", "]", "template_id", "=", "template", "[", "'id'", "]", "credentials_id", "=", "template", "[", "'default_credentials_id'", "]", "plugging_drv", "=", "self", ".", "get_hosting_device_plugging_driver", "(", "context", ",", "template_id", ")", "hosting_device_drv", "=", "self", ".", "get_hosting_device_driver", "(", "context", ",", "template_id", ")", "if", "plugging_drv", "is", "None", "or", "hosting_device_drv", "is", "None", "or", "num", "<=", "0", ":", "return", "hosting_devices", "#TODO(bobmel): Determine value for max_hosted properly", "max_hosted", "=", "1", "# template['slot_capacity']", "dev_data", ",", "mgmt_context", "=", "self", ".", "_get_resources_properties_for_hd", "(", "template", ",", "credentials_id", ")", "credentials_info", "=", "self", ".", "_credentials", ".", "get", "(", "credentials_id", ")", "if", "credentials_info", "is", "None", ":", "LOG", ".", "error", "(", "'Could not find credentials for hosting device'", "'template %s. Aborting VM hosting device creation.'", ",", "template_id", ")", "return", "hosting_devices", "connectivity_info", "=", "self", ".", "_get_mgmt_connectivity_info", "(", "context", ",", "self", ".", "mgmt_subnet_id", "(", ")", ")", "for", "i", "in", "range", "(", "num", ")", ":", "complementary_id", "=", "uuidutils", ".", "generate_uuid", "(", ")", "res", "=", "plugging_drv", ".", "create_hosting_device_resources", "(", "context", ",", "complementary_id", ",", "self", ".", "l3_tenant_id", "(", ")", ",", "mgmt_context", ",", "max_hosted", ")", "if", "res", ".", "get", "(", "'mgmt_port'", ")", "is", "None", ":", "# Required ports could not be created", "return", "hosting_devices", "connectivity_info", "[", "'mgmt_port'", "]", "=", "res", "[", "'mgmt_port'", "]", "vm_instance", "=", "self", ".", "svc_vm_mgr", ".", "dispatch_service_vm", "(", "context", ",", "template", "[", "'name'", "]", "+", "'_nrouter'", ",", "template", "[", "'image'", "]", ",", "template", "[", "'flavor'", "]", ",", "hosting_device_drv", ",", "credentials_info", ",", "connectivity_info", ",", "res", ".", "get", "(", "'ports'", ")", ")", "if", "vm_instance", "is", "not", "None", ":", "dev_data", ".", "update", "(", "{", "'id'", ":", "vm_instance", "[", "'id'", "]", ",", "'complementary_id'", ":", "complementary_id", ",", "'management_ip_address'", ":", "res", "[", "'mgmt_port'", "]", "[", "'fixed_ips'", "]", "[", "0", "]", "[", "'ip_address'", "]", ",", "'management_port_id'", ":", "res", "[", "'mgmt_port'", "]", "[", "'id'", "]", "}", ")", "self", ".", "create_hosting_device", "(", "context", ",", "{", "'hosting_device'", ":", "dev_data", "}", ")", "hosting_devices", ".", "append", "(", "vm_instance", ")", "else", ":", "# Fundamental error like could not contact Nova", "# Cleanup anything we created", "plugging_drv", ".", "delete_hosting_device_resources", "(", "context", ",", "self", ".", "l3_tenant_id", "(", ")", ",", "*", "*", "res", ")", "break", "LOG", ".", "info", "(", "'Created %(num)d hosting device VMs based on template '", "'%(t_id)s'", ",", "{", "'num'", ":", "len", "(", "hosting_devices", ")", ",", "'t_id'", ":", "template_id", "}", ")", "return", "hosting_devices" ]
52.7
0.000931
def ensure_specification_cols_are_in_dataframe(specification, dataframe): """ Checks whether each column in `specification` is in `dataframe`. Raises ValueError if any of the columns are not in the dataframe. Parameters ---------- specification : OrderedDict. Keys are a proper subset of the columns in `data`. Values are either a list or a single string, "all_diff" or "all_same". If a list, the elements should be: - single objects that are in the alternative ID column of `data` - lists of objects that are within the alternative ID column of `data`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification` values, a single column will be created for all the alternatives within the iterable (i.e. there will be one common coefficient for the variables in the iterable). dataframe : pandas DataFrame. Dataframe containing the data for the choice model to be estimated. Returns ------- None. """ # Make sure specification is an OrderedDict try: assert isinstance(specification, OrderedDict) except AssertionError: raise TypeError("`specification` must be an OrderedDict.") # Make sure dataframe is a pandas dataframe assert isinstance(dataframe, pd.DataFrame) problem_cols = [] dataframe_cols = dataframe.columns for key in specification: if key not in dataframe_cols: problem_cols.append(key) if problem_cols != []: msg = "The following keys in the specification are not in 'data':\n{}" raise ValueError(msg.format(problem_cols)) return None
[ "def", "ensure_specification_cols_are_in_dataframe", "(", "specification", ",", "dataframe", ")", ":", "# Make sure specification is an OrderedDict", "try", ":", "assert", "isinstance", "(", "specification", ",", "OrderedDict", ")", "except", "AssertionError", ":", "raise", "TypeError", "(", "\"`specification` must be an OrderedDict.\"", ")", "# Make sure dataframe is a pandas dataframe", "assert", "isinstance", "(", "dataframe", ",", "pd", ".", "DataFrame", ")", "problem_cols", "=", "[", "]", "dataframe_cols", "=", "dataframe", ".", "columns", "for", "key", "in", "specification", ":", "if", "key", "not", "in", "dataframe_cols", ":", "problem_cols", ".", "append", "(", "key", ")", "if", "problem_cols", "!=", "[", "]", ":", "msg", "=", "\"The following keys in the specification are not in 'data':\\n{}\"", "raise", "ValueError", "(", "msg", ".", "format", "(", "problem_cols", ")", ")", "return", "None" ]
41.666667
0.000521
def detect_sv(items, all_items=None, stage="standard"): """Top level parallel target for examining structural variation. """ items = [utils.to_single_data(x) for x in items] items = cwlutils.unpack_tarballs(items, items[0]) svcaller = items[0]["config"]["algorithm"].get("svcaller") caller_fn = _get_callers(items, stage, special_cases=True).get(svcaller) out = [] if svcaller and caller_fn: if (all_items and svcaller in _NEEDS_BACKGROUND and not vcfutils.is_paired_analysis([x.get("align_bam") for x in items], items)): names = set([dd.get_sample_name(x) for x in items]) background = [x for x in all_items if dd.get_sample_name(x) not in names] for svdata in caller_fn(items, background): out.append([svdata]) else: for svdata in caller_fn(items): out.append([svdata]) else: for data in items: out.append([data]) # Avoid nesting of callers for CWL runs for easier extraction if cwlutils.is_cwl_run(items[0]): out_cwl = [] for data in [utils.to_single_data(x) for x in out]: # Run validation directly from CWL runs since we're single stage data = validate.evaluate(data) data["svvalidate"] = {"summary": tz.get_in(["sv-validate", "csv"], data)} svs = data.get("sv") if svs: assert len(svs) == 1, svs data["sv"] = svs[0] else: data["sv"] = {} data = _add_supplemental(data) out_cwl.append([data]) return out_cwl return out
[ "def", "detect_sv", "(", "items", ",", "all_items", "=", "None", ",", "stage", "=", "\"standard\"", ")", ":", "items", "=", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "items", "]", "items", "=", "cwlutils", ".", "unpack_tarballs", "(", "items", ",", "items", "[", "0", "]", ")", "svcaller", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", "\"svcaller\"", ")", "caller_fn", "=", "_get_callers", "(", "items", ",", "stage", ",", "special_cases", "=", "True", ")", ".", "get", "(", "svcaller", ")", "out", "=", "[", "]", "if", "svcaller", "and", "caller_fn", ":", "if", "(", "all_items", "and", "svcaller", "in", "_NEEDS_BACKGROUND", "and", "not", "vcfutils", ".", "is_paired_analysis", "(", "[", "x", ".", "get", "(", "\"align_bam\"", ")", "for", "x", "in", "items", "]", ",", "items", ")", ")", ":", "names", "=", "set", "(", "[", "dd", ".", "get_sample_name", "(", "x", ")", "for", "x", "in", "items", "]", ")", "background", "=", "[", "x", "for", "x", "in", "all_items", "if", "dd", ".", "get_sample_name", "(", "x", ")", "not", "in", "names", "]", "for", "svdata", "in", "caller_fn", "(", "items", ",", "background", ")", ":", "out", ".", "append", "(", "[", "svdata", "]", ")", "else", ":", "for", "svdata", "in", "caller_fn", "(", "items", ")", ":", "out", ".", "append", "(", "[", "svdata", "]", ")", "else", ":", "for", "data", "in", "items", ":", "out", ".", "append", "(", "[", "data", "]", ")", "# Avoid nesting of callers for CWL runs for easier extraction", "if", "cwlutils", ".", "is_cwl_run", "(", "items", "[", "0", "]", ")", ":", "out_cwl", "=", "[", "]", "for", "data", "in", "[", "utils", ".", "to_single_data", "(", "x", ")", "for", "x", "in", "out", "]", ":", "# Run validation directly from CWL runs since we're single stage", "data", "=", "validate", ".", "evaluate", "(", "data", ")", "data", "[", "\"svvalidate\"", "]", "=", "{", "\"summary\"", ":", "tz", ".", "get_in", "(", "[", "\"sv-validate\"", ",", "\"csv\"", "]", ",", "data", ")", "}", "svs", "=", "data", ".", "get", "(", "\"sv\"", ")", "if", "svs", ":", "assert", "len", "(", "svs", ")", "==", "1", ",", "svs", "data", "[", "\"sv\"", "]", "=", "svs", "[", "0", "]", "else", ":", "data", "[", "\"sv\"", "]", "=", "{", "}", "data", "=", "_add_supplemental", "(", "data", ")", "out_cwl", ".", "append", "(", "[", "data", "]", ")", "return", "out_cwl", "return", "out" ]
43
0.002394
def build_pdf(path_jinja2, template_name, path_outfile, template_kwargs=None): '''Helper function for building a pdf from a latex jinja2 template :param path_jinja2: the root directory for latex jinja2 templates :param template_name: the relative path, to path_jinja2, to the desired jinja2 Latex template :param path_outfile: the full path to the desired final output file Must contain the same file extension as files generated by cmd_wo_infile, otherwise the process will fail :param template_kwargs: a dictionary of key/values for jinja2 variables ''' latex_template_object = LatexBuild( path_jinja2, template_name, template_kwargs, ) return latex_template_object.build_pdf(path_outfile)
[ "def", "build_pdf", "(", "path_jinja2", ",", "template_name", ",", "path_outfile", ",", "template_kwargs", "=", "None", ")", ":", "latex_template_object", "=", "LatexBuild", "(", "path_jinja2", ",", "template_name", ",", "template_kwargs", ",", ")", "return", "latex_template_object", ".", "build_pdf", "(", "path_outfile", ")" ]
45.882353
0.001256
def gapfind(model, solver, epsilon=0.001, v_max=1000, implicit_sinks=True): """Identify compounds in the model that cannot be produced. Yields all compounds that cannot be produced. This method assumes implicit sinks for all compounds in the model so the only factor that influences whether a compound can be produced is the presence of the compounds needed to produce it. Epsilon indicates the threshold amount of reaction flux for the products to be considered non-blocked. V_max indicates the maximum flux. This method is implemented as a MILP-program. Therefore it may not be efficient for larger models. Args: model: :class:`MetabolicModel` containing core reactions and reactions that can be added for gap-filling. solver: MILP solver instance. epsilon: Threshold amount of a compound produced for it to not be considered blocked. v_max: Maximum flux. implicit_sinks: Whether implicit sinks for all compounds are included when gap-filling (traditional GapFill uses implicit sinks). """ prob = solver.create_problem() # Set integrality tolerance such that w constraints are correct min_tol = prob.integrality_tolerance.min int_tol = _find_integer_tolerance(epsilon, v_max, min_tol) if int_tol < prob.integrality_tolerance.value: prob.integrality_tolerance.value = int_tol # Define flux variables v = prob.namespace() for reaction_id in model.reactions: lower, upper = model.limits[reaction_id] v.define([reaction_id], lower=lower, upper=upper) # Define constraints on production of metabolites in reaction w = prob.namespace(types=lp.VariableType.Binary) binary_cons_lhs = {compound: 0 for compound in model.compounds} for spec, value in iteritems(model.matrix): compound, reaction_id = spec if value != 0: w.define([spec]) w_var = w(spec) lower, upper = (float(x) for x in model.limits[reaction_id]) if value > 0: dv = v(reaction_id) else: dv = -v(reaction_id) lower, upper = -upper, -lower prob.add_linear_constraints( dv <= upper * w_var, dv >= epsilon + (lower - epsilon) * (1 - w_var)) binary_cons_lhs[compound] += w_var xp = prob.namespace(model.compounds, types=lp.VariableType.Binary) objective = xp.sum(model.compounds) prob.set_objective(objective) for compound, lhs in iteritems(binary_cons_lhs): prob.add_linear_constraints(lhs >= xp(compound)) # Define mass balance constraints massbalance_lhs = {compound: 0 for compound in model.compounds} for spec, value in iteritems(model.matrix): compound, reaction_id = spec massbalance_lhs[compound] += v(reaction_id) * value for compound, lhs in iteritems(massbalance_lhs): if implicit_sinks: # The constraint is merely >0 meaning that we have implicit sinks # for all compounds. prob.add_linear_constraints(lhs >= 0) else: prob.add_linear_constraints(lhs == 0) # Solve try: result = prob.solve(lp.ObjectiveSense.Maximize) except lp.SolverError as e: raise_from(GapFillError('Failed to solve gapfill: {}'.format(e), e)) for compound in model.compounds: if result.get_value(xp(compound)) < 0.5: yield compound
[ "def", "gapfind", "(", "model", ",", "solver", ",", "epsilon", "=", "0.001", ",", "v_max", "=", "1000", ",", "implicit_sinks", "=", "True", ")", ":", "prob", "=", "solver", ".", "create_problem", "(", ")", "# Set integrality tolerance such that w constraints are correct", "min_tol", "=", "prob", ".", "integrality_tolerance", ".", "min", "int_tol", "=", "_find_integer_tolerance", "(", "epsilon", ",", "v_max", ",", "min_tol", ")", "if", "int_tol", "<", "prob", ".", "integrality_tolerance", ".", "value", ":", "prob", ".", "integrality_tolerance", ".", "value", "=", "int_tol", "# Define flux variables", "v", "=", "prob", ".", "namespace", "(", ")", "for", "reaction_id", "in", "model", ".", "reactions", ":", "lower", ",", "upper", "=", "model", ".", "limits", "[", "reaction_id", "]", "v", ".", "define", "(", "[", "reaction_id", "]", ",", "lower", "=", "lower", ",", "upper", "=", "upper", ")", "# Define constraints on production of metabolites in reaction", "w", "=", "prob", ".", "namespace", "(", "types", "=", "lp", ".", "VariableType", ".", "Binary", ")", "binary_cons_lhs", "=", "{", "compound", ":", "0", "for", "compound", "in", "model", ".", "compounds", "}", "for", "spec", ",", "value", "in", "iteritems", "(", "model", ".", "matrix", ")", ":", "compound", ",", "reaction_id", "=", "spec", "if", "value", "!=", "0", ":", "w", ".", "define", "(", "[", "spec", "]", ")", "w_var", "=", "w", "(", "spec", ")", "lower", ",", "upper", "=", "(", "float", "(", "x", ")", "for", "x", "in", "model", ".", "limits", "[", "reaction_id", "]", ")", "if", "value", ">", "0", ":", "dv", "=", "v", "(", "reaction_id", ")", "else", ":", "dv", "=", "-", "v", "(", "reaction_id", ")", "lower", ",", "upper", "=", "-", "upper", ",", "-", "lower", "prob", ".", "add_linear_constraints", "(", "dv", "<=", "upper", "*", "w_var", ",", "dv", ">=", "epsilon", "+", "(", "lower", "-", "epsilon", ")", "*", "(", "1", "-", "w_var", ")", ")", "binary_cons_lhs", "[", "compound", "]", "+=", "w_var", "xp", "=", "prob", ".", "namespace", "(", "model", ".", "compounds", ",", "types", "=", "lp", ".", "VariableType", ".", "Binary", ")", "objective", "=", "xp", ".", "sum", "(", "model", ".", "compounds", ")", "prob", ".", "set_objective", "(", "objective", ")", "for", "compound", ",", "lhs", "in", "iteritems", "(", "binary_cons_lhs", ")", ":", "prob", ".", "add_linear_constraints", "(", "lhs", ">=", "xp", "(", "compound", ")", ")", "# Define mass balance constraints", "massbalance_lhs", "=", "{", "compound", ":", "0", "for", "compound", "in", "model", ".", "compounds", "}", "for", "spec", ",", "value", "in", "iteritems", "(", "model", ".", "matrix", ")", ":", "compound", ",", "reaction_id", "=", "spec", "massbalance_lhs", "[", "compound", "]", "+=", "v", "(", "reaction_id", ")", "*", "value", "for", "compound", ",", "lhs", "in", "iteritems", "(", "massbalance_lhs", ")", ":", "if", "implicit_sinks", ":", "# The constraint is merely >0 meaning that we have implicit sinks", "# for all compounds.", "prob", ".", "add_linear_constraints", "(", "lhs", ">=", "0", ")", "else", ":", "prob", ".", "add_linear_constraints", "(", "lhs", "==", "0", ")", "# Solve", "try", ":", "result", "=", "prob", ".", "solve", "(", "lp", ".", "ObjectiveSense", ".", "Maximize", ")", "except", "lp", ".", "SolverError", "as", "e", ":", "raise_from", "(", "GapFillError", "(", "'Failed to solve gapfill: {}'", ".", "format", "(", "e", ")", ",", "e", ")", ")", "for", "compound", "in", "model", ".", "compounds", ":", "if", "result", ".", "get_value", "(", "xp", "(", "compound", ")", ")", "<", "0.5", ":", "yield", "compound" ]
38.640449
0.000284
def predict(self, h=5, oos_data=None, intervals=False): """ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? oos_data : pd.DataFrame Data for the variables to be used out of sample (ys can be NaNs) intervals : boolean (default: False) Whether to return prediction intervals Returns ---------- - pd.DataFrame with predicted values """ if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: dep_var = self.formula.split("~")[0] oos_data[dep_var] = oos_data[dep_var].replace(np.nan, 0) _, X_oos = dmatrices(self.formula, oos_data) X_oos = np.array([X_oos])[0] X_pred = X_oos[:h] # Retrieve data, dates and (transformed) latent variables mu, Y = self._model(self.latent_variables.get_z_values()) date_index = self.shift_dates(h) if self.latent_variables.estimation_method in ['M-H']: sim_vector = self._sim_prediction_bayes(h, X_pred, 15000) forecasted_values = np.array([np.mean(i) for i in sim_vector]) prediction_01 = np.array([np.percentile(i, 1) for i in sim_vector]) prediction_05 = np.array([np.percentile(i, 5) for i in sim_vector]) prediction_95 = np.array([np.percentile(i, 95) for i in sim_vector]) prediction_99 = np.array([np.percentile(i, 99) for i in sim_vector]) else: t_z = self.transform_z() mean_values = self._mean_prediction(mu, Y, h, t_z, X_pred) if self.model_name2 == "Skewt": model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z) m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0)) forecasted_values = mean_values[-h:] + (model_skewness - (1.0/model_skewness))*model_scale*m1 else: forecasted_values = mean_values[-h:] if intervals is True: sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 15000) else: sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 2) if intervals is False: result = pd.DataFrame(forecasted_values) result.rename(columns={0:self.data_name}, inplace=True) else: # Get mean prediction and simulations (for errors) if self.latent_variables.estimation_method not in ['M-H']: sim_values = self._sim_prediction(mu, Y, h, t_z, X_pred, 15000) prediction_01 = np.array([np.percentile(i, 1) for i in sim_values]) prediction_05 = np.array([np.percentile(i, 5) for i in sim_values]) prediction_95 = np.array([np.percentile(i, 95) for i in sim_values]) prediction_99 = np.array([np.percentile(i, 99) for i in sim_values]) result = pd.DataFrame([forecasted_values, prediction_01, prediction_05, prediction_95, prediction_99]).T result.rename(columns={0:self.data_name, 1: "1% Prediction Interval", 2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"}, inplace=True) result.index = date_index[-h:] return result
[ "def", "predict", "(", "self", ",", "h", "=", "5", ",", "oos_data", "=", "None", ",", "intervals", "=", "False", ")", ":", "if", "self", ".", "latent_variables", ".", "estimated", "is", "False", ":", "raise", "Exception", "(", "\"No latent variables estimated!\"", ")", "else", ":", "dep_var", "=", "self", ".", "formula", ".", "split", "(", "\"~\"", ")", "[", "0", "]", "oos_data", "[", "dep_var", "]", "=", "oos_data", "[", "dep_var", "]", ".", "replace", "(", "np", ".", "nan", ",", "0", ")", "_", ",", "X_oos", "=", "dmatrices", "(", "self", ".", "formula", ",", "oos_data", ")", "X_oos", "=", "np", ".", "array", "(", "[", "X_oos", "]", ")", "[", "0", "]", "X_pred", "=", "X_oos", "[", ":", "h", "]", "# Retrieve data, dates and (transformed) latent variables", "mu", ",", "Y", "=", "self", ".", "_model", "(", "self", ".", "latent_variables", ".", "get_z_values", "(", ")", ")", "date_index", "=", "self", ".", "shift_dates", "(", "h", ")", "if", "self", ".", "latent_variables", ".", "estimation_method", "in", "[", "'M-H'", "]", ":", "sim_vector", "=", "self", ".", "_sim_prediction_bayes", "(", "h", ",", "X_pred", ",", "15000", ")", "forecasted_values", "=", "np", ".", "array", "(", "[", "np", ".", "mean", "(", "i", ")", "for", "i", "in", "sim_vector", "]", ")", "prediction_01", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "i", ",", "1", ")", "for", "i", "in", "sim_vector", "]", ")", "prediction_05", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "i", ",", "5", ")", "for", "i", "in", "sim_vector", "]", ")", "prediction_95", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "i", ",", "95", ")", "for", "i", "in", "sim_vector", "]", ")", "prediction_99", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "i", ",", "99", ")", "for", "i", "in", "sim_vector", "]", ")", "else", ":", "t_z", "=", "self", ".", "transform_z", "(", ")", "mean_values", "=", "self", ".", "_mean_prediction", "(", "mu", ",", "Y", ",", "h", ",", "t_z", ",", "X_pred", ")", "if", "self", ".", "model_name2", "==", "\"Skewt\"", ":", "model_scale", ",", "model_shape", ",", "model_skewness", "=", "self", ".", "_get_scale_and_shape", "(", "t_z", ")", "m1", "=", "(", "np", ".", "sqrt", "(", "model_shape", ")", "*", "sp", ".", "gamma", "(", "(", "model_shape", "-", "1.0", ")", "/", "2.0", ")", ")", "/", "(", "np", ".", "sqrt", "(", "np", ".", "pi", ")", "*", "sp", ".", "gamma", "(", "model_shape", "/", "2.0", ")", ")", "forecasted_values", "=", "mean_values", "[", "-", "h", ":", "]", "+", "(", "model_skewness", "-", "(", "1.0", "/", "model_skewness", ")", ")", "*", "model_scale", "*", "m1", "else", ":", "forecasted_values", "=", "mean_values", "[", "-", "h", ":", "]", "if", "intervals", "is", "True", ":", "sim_values", "=", "self", ".", "_sim_prediction", "(", "mu", ",", "Y", ",", "h", ",", "t_z", ",", "X_pred", ",", "15000", ")", "else", ":", "sim_values", "=", "self", ".", "_sim_prediction", "(", "mu", ",", "Y", ",", "h", ",", "t_z", ",", "X_pred", ",", "2", ")", "if", "intervals", "is", "False", ":", "result", "=", "pd", ".", "DataFrame", "(", "forecasted_values", ")", "result", ".", "rename", "(", "columns", "=", "{", "0", ":", "self", ".", "data_name", "}", ",", "inplace", "=", "True", ")", "else", ":", "# Get mean prediction and simulations (for errors)", "if", "self", ".", "latent_variables", ".", "estimation_method", "not", "in", "[", "'M-H'", "]", ":", "sim_values", "=", "self", ".", "_sim_prediction", "(", "mu", ",", "Y", ",", "h", ",", "t_z", ",", "X_pred", ",", "15000", ")", "prediction_01", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "i", ",", "1", ")", "for", "i", "in", "sim_values", "]", ")", "prediction_05", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "i", ",", "5", ")", "for", "i", "in", "sim_values", "]", ")", "prediction_95", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "i", ",", "95", ")", "for", "i", "in", "sim_values", "]", ")", "prediction_99", "=", "np", ".", "array", "(", "[", "np", ".", "percentile", "(", "i", ",", "99", ")", "for", "i", "in", "sim_values", "]", ")", "result", "=", "pd", ".", "DataFrame", "(", "[", "forecasted_values", ",", "prediction_01", ",", "prediction_05", ",", "prediction_95", ",", "prediction_99", "]", ")", ".", "T", "result", ".", "rename", "(", "columns", "=", "{", "0", ":", "self", ".", "data_name", ",", "1", ":", "\"1% Prediction Interval\"", ",", "2", ":", "\"5% Prediction Interval\"", ",", "3", ":", "\"95% Prediction Interval\"", ",", "4", ":", "\"99% Prediction Interval\"", "}", ",", "inplace", "=", "True", ")", "result", ".", "index", "=", "date_index", "[", "-", "h", ":", "]", "return", "result" ]
45.3
0.008372
def set_bpduguard(self, name, value=False, default=False, disable=False): """Configures the bpduguard value for the specified interface Args: name (string): The interface identifier to configure. The name must be the full interface name (eg Ethernet1, not Et1) value (bool): True if bpduguard is enabled otherwise False default (bool): Configures the bpduguard parameter to its default value using the EOS CLI default config command disable (bool): Negates the bpduguard parameter using the EOS CLI no config command Returns: True if the command succeeds, otherwise False Raises: ValueError: Rasied if an invalid interface name is specified TypeError: Raised if the value keyword argument does not evaluate to a valid boolean """ value = 'enable' if value else 'disable' string = 'spanning-tree bpduguard' cmds = self.command_builder(string, value=value, default=default, disable=disable) return self.configure_interface(name, cmds)
[ "def", "set_bpduguard", "(", "self", ",", "name", ",", "value", "=", "False", ",", "default", "=", "False", ",", "disable", "=", "False", ")", ":", "value", "=", "'enable'", "if", "value", "else", "'disable'", "string", "=", "'spanning-tree bpduguard'", "cmds", "=", "self", ".", "command_builder", "(", "string", ",", "value", "=", "value", ",", "default", "=", "default", ",", "disable", "=", "disable", ")", "return", "self", ".", "configure_interface", "(", "name", ",", "cmds", ")" ]
38.933333
0.001671
def _validate_string(self, input_string, path_to_root, object_title=''): ''' a helper method for validating properties of a string :return: input_string ''' rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root) input_criteria = self.keyMap[rules_path_to_root] error_dict = { 'object_title': object_title, 'model_schema': self.schema, 'input_criteria': input_criteria, 'failed_test': 'value_datatype', 'input_path': path_to_root, 'error_value': input_string, 'error_code': 4001 } if 'byte_data' in input_criteria.keys(): if input_criteria['byte_data']: error_dict['failed_test'] = 'byte_data' error_dict['error_code'] = 4011 try: decoded_bytes = b64decode(input_string) except: raise InputValidationError(error_dict) if not isinstance(decoded_bytes, bytes): raise InputValidationError(error_dict) if 'min_value' in input_criteria.keys(): if input_string < input_criteria['min_value']: error_dict['failed_test'] = 'min_value' error_dict['error_code'] = 4022 raise InputValidationError(error_dict) if 'max_value' in input_criteria.keys(): if input_string > input_criteria['max_value']: error_dict['failed_test'] = 'max_value' error_dict['error_code'] = 4023 raise InputValidationError(error_dict) if 'greater_than' in input_criteria.keys(): if input_string <= input_criteria['greater_than']: error_dict['failed_test'] = 'greater_than' error_dict['error_code'] = 4024 raise InputValidationError(error_dict) if 'less_than' in input_criteria.keys(): if input_string >= input_criteria['less_than']: error_dict['failed_test'] = 'less_than' error_dict['error_code'] = 4025 raise InputValidationError(error_dict) if 'equal_to' in input_criteria.keys(): if input_string != input_criteria['equal_to']: error_dict['failed_test'] = 'equal_to' error_dict['error_code'] = 4026 raise InputValidationError(error_dict) if 'min_length' in input_criteria.keys(): if len(input_string) < input_criteria['min_length']: error_dict['failed_test'] = 'min_length' error_dict['error_code'] = 4012 raise InputValidationError(error_dict) if 'max_length' in input_criteria.keys(): if len(input_string) > input_criteria['max_length']: error_dict['failed_test'] = 'max_length' error_dict['error_code'] = 4013 raise InputValidationError(error_dict) if 'must_not_contain' in input_criteria.keys(): for regex in input_criteria['must_not_contain']: regex_pattern = re.compile(regex) if regex_pattern.findall(input_string): error_dict['failed_test'] = 'must_not_contain' error_dict['error_code'] = 4014 raise InputValidationError(error_dict) if 'must_contain' in input_criteria.keys(): for regex in input_criteria['must_contain']: regex_pattern = re.compile(regex) if not regex_pattern.findall(input_string): error_dict['failed_test'] = 'must_contain' error_dict['error_code'] = 4015 raise InputValidationError(error_dict) if 'contains_either' in input_criteria.keys(): regex_match = False for regex in input_criteria['contains_either']: regex_pattern = re.compile(regex) if regex_pattern.findall(input_string): regex_match = True if not regex_match: error_dict['failed_test'] = 'contains_either' error_dict['error_code'] = 4016 raise InputValidationError(error_dict) if 'discrete_values' in input_criteria.keys(): if input_string not in input_criteria['discrete_values']: error_dict['failed_test'] = 'discrete_values' error_dict['error_code'] = 4041 raise InputValidationError(error_dict) if 'excluded_values' in input_criteria.keys(): if input_string in input_criteria['excluded_values']: error_dict['failed_test'] = 'excluded_values' error_dict['error_code'] = 4042 raise InputValidationError(error_dict) # TODO: validate string against identical to reference # TODO: run lambda function and call validation url return input_string
[ "def", "_validate_string", "(", "self", ",", "input_string", ",", "path_to_root", ",", "object_title", "=", "''", ")", ":", "rules_path_to_root", "=", "re", ".", "sub", "(", "'\\[\\d+\\]'", ",", "'[0]'", ",", "path_to_root", ")", "input_criteria", "=", "self", ".", "keyMap", "[", "rules_path_to_root", "]", "error_dict", "=", "{", "'object_title'", ":", "object_title", ",", "'model_schema'", ":", "self", ".", "schema", ",", "'input_criteria'", ":", "input_criteria", ",", "'failed_test'", ":", "'value_datatype'", ",", "'input_path'", ":", "path_to_root", ",", "'error_value'", ":", "input_string", ",", "'error_code'", ":", "4001", "}", "if", "'byte_data'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "input_criteria", "[", "'byte_data'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'byte_data'", "error_dict", "[", "'error_code'", "]", "=", "4011", "try", ":", "decoded_bytes", "=", "b64decode", "(", "input_string", ")", "except", ":", "raise", "InputValidationError", "(", "error_dict", ")", "if", "not", "isinstance", "(", "decoded_bytes", ",", "bytes", ")", ":", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'min_value'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "input_string", "<", "input_criteria", "[", "'min_value'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'min_value'", "error_dict", "[", "'error_code'", "]", "=", "4022", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'max_value'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "input_string", ">", "input_criteria", "[", "'max_value'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'max_value'", "error_dict", "[", "'error_code'", "]", "=", "4023", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'greater_than'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "input_string", "<=", "input_criteria", "[", "'greater_than'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'greater_than'", "error_dict", "[", "'error_code'", "]", "=", "4024", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'less_than'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "input_string", ">=", "input_criteria", "[", "'less_than'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'less_than'", "error_dict", "[", "'error_code'", "]", "=", "4025", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'equal_to'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "input_string", "!=", "input_criteria", "[", "'equal_to'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'equal_to'", "error_dict", "[", "'error_code'", "]", "=", "4026", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'min_length'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "len", "(", "input_string", ")", "<", "input_criteria", "[", "'min_length'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'min_length'", "error_dict", "[", "'error_code'", "]", "=", "4012", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'max_length'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "len", "(", "input_string", ")", ">", "input_criteria", "[", "'max_length'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'max_length'", "error_dict", "[", "'error_code'", "]", "=", "4013", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'must_not_contain'", "in", "input_criteria", ".", "keys", "(", ")", ":", "for", "regex", "in", "input_criteria", "[", "'must_not_contain'", "]", ":", "regex_pattern", "=", "re", ".", "compile", "(", "regex", ")", "if", "regex_pattern", ".", "findall", "(", "input_string", ")", ":", "error_dict", "[", "'failed_test'", "]", "=", "'must_not_contain'", "error_dict", "[", "'error_code'", "]", "=", "4014", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'must_contain'", "in", "input_criteria", ".", "keys", "(", ")", ":", "for", "regex", "in", "input_criteria", "[", "'must_contain'", "]", ":", "regex_pattern", "=", "re", ".", "compile", "(", "regex", ")", "if", "not", "regex_pattern", ".", "findall", "(", "input_string", ")", ":", "error_dict", "[", "'failed_test'", "]", "=", "'must_contain'", "error_dict", "[", "'error_code'", "]", "=", "4015", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'contains_either'", "in", "input_criteria", ".", "keys", "(", ")", ":", "regex_match", "=", "False", "for", "regex", "in", "input_criteria", "[", "'contains_either'", "]", ":", "regex_pattern", "=", "re", ".", "compile", "(", "regex", ")", "if", "regex_pattern", ".", "findall", "(", "input_string", ")", ":", "regex_match", "=", "True", "if", "not", "regex_match", ":", "error_dict", "[", "'failed_test'", "]", "=", "'contains_either'", "error_dict", "[", "'error_code'", "]", "=", "4016", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'discrete_values'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "input_string", "not", "in", "input_criteria", "[", "'discrete_values'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'discrete_values'", "error_dict", "[", "'error_code'", "]", "=", "4041", "raise", "InputValidationError", "(", "error_dict", ")", "if", "'excluded_values'", "in", "input_criteria", ".", "keys", "(", ")", ":", "if", "input_string", "in", "input_criteria", "[", "'excluded_values'", "]", ":", "error_dict", "[", "'failed_test'", "]", "=", "'excluded_values'", "error_dict", "[", "'error_code'", "]", "=", "4042", "raise", "InputValidationError", "(", "error_dict", ")", "# TODO: validate string against identical to reference", "# TODO: run lambda function and call validation url", "return", "input_string" ]
47.230769
0.001196
def get_input_list(self): """ Description: Get input list Returns an ordered list of all available input keys and names """ inputs = [' '] * len(self.command['input']) for key in self.command['input']: inputs[self.command['input'][key]['order']] = {"key":key, "name":self.command['input'][key]['name']} return inputs
[ "def", "get_input_list", "(", "self", ")", ":", "inputs", "=", "[", "' '", "]", "*", "len", "(", "self", ".", "command", "[", "'input'", "]", ")", "for", "key", "in", "self", ".", "command", "[", "'input'", "]", ":", "inputs", "[", "self", ".", "command", "[", "'input'", "]", "[", "key", "]", "[", "'order'", "]", "]", "=", "{", "\"key\"", ":", "key", ",", "\"name\"", ":", "self", ".", "command", "[", "'input'", "]", "[", "key", "]", "[", "'name'", "]", "}", "return", "inputs" ]
32.583333
0.012438
def get_mesh_dict(self): """Returns calculated mesh sampling phonons Returns ------- dict keys: qpoints, weights, frequencies, eigenvectors, and group_velocities Each value for the corresponding key is explained as below. qpoints: ndarray q-points in reduced coordinates of reciprocal lattice dtype='double' shape=(ir-grid points, 3) weights: ndarray Geometric q-point weights. Its sum is the number of grid points. dtype='intc' shape=(ir-grid points,) frequencies: ndarray Phonon frequencies at ir-grid points. Imaginary frequenies are represented by negative real numbers. dtype='double' shape=(ir-grid points, bands) eigenvectors: ndarray Phonon eigenvectors at ir-grid points. See the data structure at np.linalg.eigh. dtype='complex' shape=(ir-grid points, bands, bands) group_velocities: ndarray Phonon group velocities at ir-grid points. dtype='double' shape=(ir-grid points, bands, 3) """ if self._mesh is None: msg = ("run_mesh has to be done.") raise RuntimeError(msg) retdict = {'qpoints': self._mesh.qpoints, 'weights': self._mesh.weights, 'frequencies': self._mesh.frequencies, 'eigenvectors': self._mesh.eigenvectors, 'group_velocities': self._mesh.group_velocities} return retdict
[ "def", "get_mesh_dict", "(", "self", ")", ":", "if", "self", ".", "_mesh", "is", "None", ":", "msg", "=", "(", "\"run_mesh has to be done.\"", ")", "raise", "RuntimeError", "(", "msg", ")", "retdict", "=", "{", "'qpoints'", ":", "self", ".", "_mesh", ".", "qpoints", ",", "'weights'", ":", "self", ".", "_mesh", ".", "weights", ",", "'frequencies'", ":", "self", ".", "_mesh", ".", "frequencies", ",", "'eigenvectors'", ":", "self", ".", "_mesh", ".", "eigenvectors", ",", "'group_velocities'", ":", "self", ".", "_mesh", ".", "group_velocities", "}", "return", "retdict" ]
36.361702
0.00114
def copy_dir(source, dest, vars, verbosity=1, simulate=False, indent=0, sub_vars=True, interactive=False, overwrite=True, template_renderer=None, out_=sys.stdout): """ Copies the ``source`` directory to the ``dest`` directory. ``vars``: A dictionary of variables to use in any substitutions. ``verbosity``: Higher numbers will show more about what is happening. ``simulate``: If true, then don't actually *do* anything. ``indent``: Indent any messages by this amount. ``sub_vars``: If true, variables in ``_tmpl`` files and ``+var+`` in filenames will be substituted. ``overwrite``: If false, then don't ever overwrite anything. ``interactive``: If you are overwriting a file and interactive is true, then ask before overwriting. ``template_renderer``: This is a function for rendering templates (if you don't want to use string.Template). It should have the signature ``template_renderer(content_as_string, vars_as_dict, filename=filename)``. """ def out(msg): out_.write(msg) out_.write('\n') out_.flush() # This allows you to use a leading +dot+ in filenames which would # otherwise be skipped because leading dots make the file hidden: vars.setdefault('dot', '.') vars.setdefault('plus', '+') use_pkg_resources = isinstance(source, tuple) if use_pkg_resources: names = sorted(pkg_resources.resource_listdir(source[0], source[1])) else: names = sorted(os.listdir(source)) pad = ' '*(indent*2) if not os.path.exists(dest): if verbosity >= 1: out('%sCreating %s/' % (pad, dest)) if not simulate: makedirs(dest, verbosity=verbosity, pad=pad) elif verbosity >= 2: out('%sDirectory %s exists' % (pad, dest)) for name in names: if use_pkg_resources: full = '/'.join([source[1], name]) else: full = os.path.join(source, name) reason = should_skip_file(name) if reason: if verbosity >= 2: reason = pad + reason % {'filename': full} out(reason) continue # pragma: no cover if sub_vars: dest_full = os.path.join(dest, substitute_filename(name, vars)) sub_file = False if dest_full.endswith('_tmpl'): dest_full = dest_full[:-5] sub_file = sub_vars if use_pkg_resources and pkg_resources.resource_isdir(source[0], full): if verbosity: out('%sRecursing into %s' % (pad, os.path.basename(full))) copy_dir((source[0], full), dest_full, vars, verbosity, simulate, indent=indent+1, sub_vars=sub_vars, interactive=interactive, template_renderer=template_renderer, out_=out_) continue elif not use_pkg_resources and os.path.isdir(full): if verbosity: out('%sRecursing into %s' % (pad, os.path.basename(full))) copy_dir(full, dest_full, vars, verbosity, simulate, indent=indent+1, sub_vars=sub_vars, interactive=interactive, template_renderer=template_renderer, out_=out_) continue elif use_pkg_resources: content = pkg_resources.resource_string(source[0], full) else: f = open(full, 'rb') content = f.read() f.close() if sub_file: try: content = substitute_content( content, vars, filename=full, template_renderer=template_renderer ) except SkipTemplate: continue # pragma: no cover if content is None: continue # pragma: no cover already_exists = os.path.exists(dest_full) if already_exists: f = open(dest_full, 'rb') old_content = f.read() f.close() if old_content == content: if verbosity: out('%s%s already exists (same content)' % (pad, dest_full)) continue # pragma: no cover if interactive: if not query_interactive( native_(full, fsenc), native_(dest_full, fsenc), native_(content, fsenc), native_(old_content, fsenc), simulate=simulate, out_=out_): continue elif not overwrite: continue # pragma: no cover if verbosity and use_pkg_resources: out('%sCopying %s to %s' % (pad, full, dest_full)) elif verbosity: out( '%sCopying %s to %s' % (pad, os.path.basename(full), dest_full)) if not simulate: f = open(dest_full, 'wb') f.write(content) f.close()
[ "def", "copy_dir", "(", "source", ",", "dest", ",", "vars", ",", "verbosity", "=", "1", ",", "simulate", "=", "False", ",", "indent", "=", "0", ",", "sub_vars", "=", "True", ",", "interactive", "=", "False", ",", "overwrite", "=", "True", ",", "template_renderer", "=", "None", ",", "out_", "=", "sys", ".", "stdout", ")", ":", "def", "out", "(", "msg", ")", ":", "out_", ".", "write", "(", "msg", ")", "out_", ".", "write", "(", "'\\n'", ")", "out_", ".", "flush", "(", ")", "# This allows you to use a leading +dot+ in filenames which would", "# otherwise be skipped because leading dots make the file hidden:", "vars", ".", "setdefault", "(", "'dot'", ",", "'.'", ")", "vars", ".", "setdefault", "(", "'plus'", ",", "'+'", ")", "use_pkg_resources", "=", "isinstance", "(", "source", ",", "tuple", ")", "if", "use_pkg_resources", ":", "names", "=", "sorted", "(", "pkg_resources", ".", "resource_listdir", "(", "source", "[", "0", "]", ",", "source", "[", "1", "]", ")", ")", "else", ":", "names", "=", "sorted", "(", "os", ".", "listdir", "(", "source", ")", ")", "pad", "=", "' '", "*", "(", "indent", "*", "2", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dest", ")", ":", "if", "verbosity", ">=", "1", ":", "out", "(", "'%sCreating %s/'", "%", "(", "pad", ",", "dest", ")", ")", "if", "not", "simulate", ":", "makedirs", "(", "dest", ",", "verbosity", "=", "verbosity", ",", "pad", "=", "pad", ")", "elif", "verbosity", ">=", "2", ":", "out", "(", "'%sDirectory %s exists'", "%", "(", "pad", ",", "dest", ")", ")", "for", "name", "in", "names", ":", "if", "use_pkg_resources", ":", "full", "=", "'/'", ".", "join", "(", "[", "source", "[", "1", "]", ",", "name", "]", ")", "else", ":", "full", "=", "os", ".", "path", ".", "join", "(", "source", ",", "name", ")", "reason", "=", "should_skip_file", "(", "name", ")", "if", "reason", ":", "if", "verbosity", ">=", "2", ":", "reason", "=", "pad", "+", "reason", "%", "{", "'filename'", ":", "full", "}", "out", "(", "reason", ")", "continue", "# pragma: no cover", "if", "sub_vars", ":", "dest_full", "=", "os", ".", "path", ".", "join", "(", "dest", ",", "substitute_filename", "(", "name", ",", "vars", ")", ")", "sub_file", "=", "False", "if", "dest_full", ".", "endswith", "(", "'_tmpl'", ")", ":", "dest_full", "=", "dest_full", "[", ":", "-", "5", "]", "sub_file", "=", "sub_vars", "if", "use_pkg_resources", "and", "pkg_resources", ".", "resource_isdir", "(", "source", "[", "0", "]", ",", "full", ")", ":", "if", "verbosity", ":", "out", "(", "'%sRecursing into %s'", "%", "(", "pad", ",", "os", ".", "path", ".", "basename", "(", "full", ")", ")", ")", "copy_dir", "(", "(", "source", "[", "0", "]", ",", "full", ")", ",", "dest_full", ",", "vars", ",", "verbosity", ",", "simulate", ",", "indent", "=", "indent", "+", "1", ",", "sub_vars", "=", "sub_vars", ",", "interactive", "=", "interactive", ",", "template_renderer", "=", "template_renderer", ",", "out_", "=", "out_", ")", "continue", "elif", "not", "use_pkg_resources", "and", "os", ".", "path", ".", "isdir", "(", "full", ")", ":", "if", "verbosity", ":", "out", "(", "'%sRecursing into %s'", "%", "(", "pad", ",", "os", ".", "path", ".", "basename", "(", "full", ")", ")", ")", "copy_dir", "(", "full", ",", "dest_full", ",", "vars", ",", "verbosity", ",", "simulate", ",", "indent", "=", "indent", "+", "1", ",", "sub_vars", "=", "sub_vars", ",", "interactive", "=", "interactive", ",", "template_renderer", "=", "template_renderer", ",", "out_", "=", "out_", ")", "continue", "elif", "use_pkg_resources", ":", "content", "=", "pkg_resources", ".", "resource_string", "(", "source", "[", "0", "]", ",", "full", ")", "else", ":", "f", "=", "open", "(", "full", ",", "'rb'", ")", "content", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "if", "sub_file", ":", "try", ":", "content", "=", "substitute_content", "(", "content", ",", "vars", ",", "filename", "=", "full", ",", "template_renderer", "=", "template_renderer", ")", "except", "SkipTemplate", ":", "continue", "# pragma: no cover", "if", "content", "is", "None", ":", "continue", "# pragma: no cover", "already_exists", "=", "os", ".", "path", ".", "exists", "(", "dest_full", ")", "if", "already_exists", ":", "f", "=", "open", "(", "dest_full", ",", "'rb'", ")", "old_content", "=", "f", ".", "read", "(", ")", "f", ".", "close", "(", ")", "if", "old_content", "==", "content", ":", "if", "verbosity", ":", "out", "(", "'%s%s already exists (same content)'", "%", "(", "pad", ",", "dest_full", ")", ")", "continue", "# pragma: no cover", "if", "interactive", ":", "if", "not", "query_interactive", "(", "native_", "(", "full", ",", "fsenc", ")", ",", "native_", "(", "dest_full", ",", "fsenc", ")", ",", "native_", "(", "content", ",", "fsenc", ")", ",", "native_", "(", "old_content", ",", "fsenc", ")", ",", "simulate", "=", "simulate", ",", "out_", "=", "out_", ")", ":", "continue", "elif", "not", "overwrite", ":", "continue", "# pragma: no cover", "if", "verbosity", "and", "use_pkg_resources", ":", "out", "(", "'%sCopying %s to %s'", "%", "(", "pad", ",", "full", ",", "dest_full", ")", ")", "elif", "verbosity", ":", "out", "(", "'%sCopying %s to %s'", "%", "(", "pad", ",", "os", ".", "path", ".", "basename", "(", "full", ")", ",", "dest_full", ")", ")", "if", "not", "simulate", ":", "f", "=", "open", "(", "dest_full", ",", "'wb'", ")", "f", ".", "write", "(", "content", ")", "f", ".", "close", "(", ")" ]
38.984
0.002401
def close(self): """ Closes this cloud. """ if not (yield from super().close()): return False for nio in self._nios.values(): if nio and isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) yield from self._stop_ubridge() log.info('Cloud "{name}" [{id}] has been closed'.format(name=self._name, id=self._id))
[ "def", "close", "(", "self", ")", ":", "if", "not", "(", "yield", "from", "super", "(", ")", ".", "close", "(", ")", ")", ":", "return", "False", "for", "nio", "in", "self", ".", "_nios", ".", "values", "(", ")", ":", "if", "nio", "and", "isinstance", "(", "nio", ",", "NIOUDP", ")", ":", "self", ".", "manager", ".", "port_manager", ".", "release_udp_port", "(", "nio", ".", "lport", ",", "self", ".", "_project", ")", "yield", "from", "self", ".", "_stop_ubridge", "(", ")", "log", ".", "info", "(", "'Cloud \"{name}\" [{id}] has been closed'", ".", "format", "(", "name", "=", "self", ".", "_name", ",", "id", "=", "self", ".", "_id", ")", ")" ]
31.071429
0.008929
def _check_voSet(orb,kwargs,funcName): """Function to check whether vo is set, because it's required for funcName""" if not orb._voSet and kwargs.get('vo',None) is None: warnings.warn("Method %s(.) requires vo to be given at Orbit initialization or at method evaluation; using default vo which is %f km/s" % (funcName,orb._vo), galpyWarning)
[ "def", "_check_voSet", "(", "orb", ",", "kwargs", ",", "funcName", ")", ":", "if", "not", "orb", ".", "_voSet", "and", "kwargs", ".", "get", "(", "'vo'", ",", "None", ")", "is", "None", ":", "warnings", ".", "warn", "(", "\"Method %s(.) requires vo to be given at Orbit initialization or at method evaluation; using default vo which is %f km/s\"", "%", "(", "funcName", ",", "orb", ".", "_vo", ")", ",", "galpyWarning", ")" ]
75
0.01847
def findExtname(fimg, extname, extver=None): """ Returns the list number of the extension corresponding to EXTNAME given. """ i = 0 extnum = None for chip in fimg: hdr = chip.header if 'EXTNAME' in hdr: if hdr['EXTNAME'].strip() == extname.upper(): if extver is None or hdr['EXTVER'] == extver: extnum = i break i += 1 return extnum
[ "def", "findExtname", "(", "fimg", ",", "extname", ",", "extver", "=", "None", ")", ":", "i", "=", "0", "extnum", "=", "None", "for", "chip", "in", "fimg", ":", "hdr", "=", "chip", ".", "header", "if", "'EXTNAME'", "in", "hdr", ":", "if", "hdr", "[", "'EXTNAME'", "]", ".", "strip", "(", ")", "==", "extname", ".", "upper", "(", ")", ":", "if", "extver", "is", "None", "or", "hdr", "[", "'EXTVER'", "]", "==", "extver", ":", "extnum", "=", "i", "break", "i", "+=", "1", "return", "extnum" ]
27.375
0.002208
def _EvaluateNumberOfElements(self, context): """Evaluates number of elements. Args: context (DataTypeMapContext): data type map context. Returns: int: number of elements. Raises: MappingError: if the number of elements cannot be determined. """ number_of_elements = None if self._data_type_definition.number_of_elements: number_of_elements = self._data_type_definition.number_of_elements elif self._data_type_definition.number_of_elements_expression: expression = self._data_type_definition.number_of_elements_expression namespace = {} if context and context.values: namespace.update(context.values) # Make sure __builtins__ contains an empty dictionary. namespace['__builtins__'] = {} try: number_of_elements = eval(expression, namespace) # pylint: disable=eval-used except Exception as exception: raise errors.MappingError( 'Unable to determine number of elements with error: {0!s}'.format( exception)) if number_of_elements is None or number_of_elements < 0: raise errors.MappingError( 'Invalid number of elements: {0!s}'.format(number_of_elements)) return number_of_elements
[ "def", "_EvaluateNumberOfElements", "(", "self", ",", "context", ")", ":", "number_of_elements", "=", "None", "if", "self", ".", "_data_type_definition", ".", "number_of_elements", ":", "number_of_elements", "=", "self", ".", "_data_type_definition", ".", "number_of_elements", "elif", "self", ".", "_data_type_definition", ".", "number_of_elements_expression", ":", "expression", "=", "self", ".", "_data_type_definition", ".", "number_of_elements_expression", "namespace", "=", "{", "}", "if", "context", "and", "context", ".", "values", ":", "namespace", ".", "update", "(", "context", ".", "values", ")", "# Make sure __builtins__ contains an empty dictionary.", "namespace", "[", "'__builtins__'", "]", "=", "{", "}", "try", ":", "number_of_elements", "=", "eval", "(", "expression", ",", "namespace", ")", "# pylint: disable=eval-used", "except", "Exception", "as", "exception", ":", "raise", "errors", ".", "MappingError", "(", "'Unable to determine number of elements with error: {0!s}'", ".", "format", "(", "exception", ")", ")", "if", "number_of_elements", "is", "None", "or", "number_of_elements", "<", "0", ":", "raise", "errors", ".", "MappingError", "(", "'Invalid number of elements: {0!s}'", ".", "format", "(", "number_of_elements", ")", ")", "return", "number_of_elements" ]
34.055556
0.008723
def _ensure_data(values, dtype=None): """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string) """ # we check some simple dtypes first try: if is_object_dtype(dtype): return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype)): if is_period_dtype(values) or is_period_dtype(dtype): from pandas import PeriodIndex values = PeriodIndex(values) dtype = values.dtype elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype): from pandas import TimedeltaIndex values = TimedeltaIndex(values) dtype = values.dtype else: # Datetime from pandas import DatetimeIndex values = DatetimeIndex(values) dtype = values.dtype return values.asi8, dtype, 'int64' elif (is_categorical_dtype(values) and (is_categorical_dtype(dtype) or dtype is None)): values = getattr(values, 'values', values) values = values.codes dtype = 'category' # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values, dtype=np.object) return ensure_object(values), 'object', 'object'
[ "def", "_ensure_data", "(", "values", ",", "dtype", "=", "None", ")", ":", "# we check some simple dtypes first", "try", ":", "if", "is_object_dtype", "(", "dtype", ")", ":", "return", "ensure_object", "(", "np", ".", "asarray", "(", "values", ")", ")", ",", "'object'", ",", "'object'", "if", "is_bool_dtype", "(", "values", ")", "or", "is_bool_dtype", "(", "dtype", ")", ":", "# we are actually coercing to uint64", "# until our algos support uint8 directly (see TODO)", "return", "np", ".", "asarray", "(", "values", ")", ".", "astype", "(", "'uint64'", ")", ",", "'bool'", ",", "'uint64'", "elif", "is_signed_integer_dtype", "(", "values", ")", "or", "is_signed_integer_dtype", "(", "dtype", ")", ":", "return", "ensure_int64", "(", "values", ")", ",", "'int64'", ",", "'int64'", "elif", "(", "is_unsigned_integer_dtype", "(", "values", ")", "or", "is_unsigned_integer_dtype", "(", "dtype", ")", ")", ":", "return", "ensure_uint64", "(", "values", ")", ",", "'uint64'", ",", "'uint64'", "elif", "is_float_dtype", "(", "values", ")", "or", "is_float_dtype", "(", "dtype", ")", ":", "return", "ensure_float64", "(", "values", ")", ",", "'float64'", ",", "'float64'", "elif", "is_object_dtype", "(", "values", ")", "and", "dtype", "is", "None", ":", "return", "ensure_object", "(", "np", ".", "asarray", "(", "values", ")", ")", ",", "'object'", ",", "'object'", "elif", "is_complex_dtype", "(", "values", ")", "or", "is_complex_dtype", "(", "dtype", ")", ":", "# ignore the fact that we are casting to float", "# which discards complex parts", "with", "catch_warnings", "(", ")", ":", "simplefilter", "(", "\"ignore\"", ",", "np", ".", "ComplexWarning", ")", "values", "=", "ensure_float64", "(", "values", ")", "return", "values", ",", "'float64'", ",", "'float64'", "except", "(", "TypeError", ",", "ValueError", ",", "OverflowError", ")", ":", "# if we are trying to coerce to a dtype", "# and it is incompat this will fall thru to here", "return", "ensure_object", "(", "values", ")", ",", "'object'", ",", "'object'", "# datetimelike", "if", "(", "needs_i8_conversion", "(", "values", ")", "or", "is_period_dtype", "(", "dtype", ")", "or", "is_datetime64_any_dtype", "(", "dtype", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ")", ":", "if", "is_period_dtype", "(", "values", ")", "or", "is_period_dtype", "(", "dtype", ")", ":", "from", "pandas", "import", "PeriodIndex", "values", "=", "PeriodIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "elif", "is_timedelta64_dtype", "(", "values", ")", "or", "is_timedelta64_dtype", "(", "dtype", ")", ":", "from", "pandas", "import", "TimedeltaIndex", "values", "=", "TimedeltaIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "else", ":", "# Datetime", "from", "pandas", "import", "DatetimeIndex", "values", "=", "DatetimeIndex", "(", "values", ")", "dtype", "=", "values", ".", "dtype", "return", "values", ".", "asi8", ",", "dtype", ",", "'int64'", "elif", "(", "is_categorical_dtype", "(", "values", ")", "and", "(", "is_categorical_dtype", "(", "dtype", ")", "or", "dtype", "is", "None", ")", ")", ":", "values", "=", "getattr", "(", "values", ",", "'values'", ",", "values", ")", "values", "=", "values", ".", "codes", "dtype", "=", "'category'", "# we are actually coercing to int64", "# until our algos support int* directly (not all do)", "values", "=", "ensure_int64", "(", "values", ")", "return", "values", ",", "dtype", ",", "'int64'", "# we have failed, return object", "values", "=", "np", ".", "asarray", "(", "values", ",", "dtype", "=", "np", ".", "object", ")", "return", "ensure_object", "(", "values", ")", ",", "'object'", ",", "'object'" ]
35.673913
0.000296
def _newsToDF(n): '''internal''' df = pd.DataFrame(n) _toDatetime(df) _reindex(df, 'datetime') return df
[ "def", "_newsToDF", "(", "n", ")", ":", "df", "=", "pd", ".", "DataFrame", "(", "n", ")", "_toDatetime", "(", "df", ")", "_reindex", "(", "df", ",", "'datetime'", ")", "return", "df" ]
19.833333
0.008065
def f1_score(y_true, y_pred, average='micro', suffix=False): """Compute the F1 score. The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) Args: y_true : 2d array. Ground truth (correct) target values. y_pred : 2d array. Estimated targets as returned by a tagger. Returns: score : float. Example: >>> from seqeval.metrics import f1_score >>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] >>> f1_score(y_true, y_pred) 0.50 """ true_entities = set(get_entities(y_true, suffix)) pred_entities = set(get_entities(y_pred, suffix)) nb_correct = len(true_entities & pred_entities) nb_pred = len(pred_entities) nb_true = len(true_entities) p = nb_correct / nb_pred if nb_pred > 0 else 0 r = nb_correct / nb_true if nb_true > 0 else 0 score = 2 * p * r / (p + r) if p + r > 0 else 0 return score
[ "def", "f1_score", "(", "y_true", ",", "y_pred", ",", "average", "=", "'micro'", ",", "suffix", "=", "False", ")", ":", "true_entities", "=", "set", "(", "get_entities", "(", "y_true", ",", "suffix", ")", ")", "pred_entities", "=", "set", "(", "get_entities", "(", "y_pred", ",", "suffix", ")", ")", "nb_correct", "=", "len", "(", "true_entities", "&", "pred_entities", ")", "nb_pred", "=", "len", "(", "pred_entities", ")", "nb_true", "=", "len", "(", "true_entities", ")", "p", "=", "nb_correct", "/", "nb_pred", "if", "nb_pred", ">", "0", "else", "0", "r", "=", "nb_correct", "/", "nb_true", "if", "nb_true", ">", "0", "else", "0", "score", "=", "2", "*", "p", "*", "r", "/", "(", "p", "+", "r", ")", "if", "p", "+", "r", ">", "0", "else", "0", "return", "score" ]
36.166667
0.002244
def _query_pageant(msg): """ Communication with the Pageant process is done through a shared memory-mapped file. """ hwnd = _get_pageant_window_object() if not hwnd: # Raise a failure to connect exception, pageant isn't running anymore! return None # create a name for the mmap map_name = "PageantRequest%08x" % thread.get_ident() pymap = _winapi.MemoryMap( map_name, _AGENT_MAX_MSGLEN, _winapi.get_security_attributes_for_user() ) with pymap: pymap.write(msg) # Create an array buffer containing the mapped filename char_buffer = array.array("b", b(map_name) + zero_byte) # noqa char_buffer_address, char_buffer_size = char_buffer.buffer_info() # Create a string to use for the SendMessage function call cds = COPYDATASTRUCT( _AGENT_COPYDATA_ID, char_buffer_size, char_buffer_address ) response = ctypes.windll.user32.SendMessageA( hwnd, win32con_WM_COPYDATA, ctypes.sizeof(cds), ctypes.byref(cds) ) if response > 0: pymap.seek(0) datalen = pymap.read(4) retlen = struct.unpack(">I", datalen)[0] return datalen + pymap.read(retlen) return None
[ "def", "_query_pageant", "(", "msg", ")", ":", "hwnd", "=", "_get_pageant_window_object", "(", ")", "if", "not", "hwnd", ":", "# Raise a failure to connect exception, pageant isn't running anymore!", "return", "None", "# create a name for the mmap", "map_name", "=", "\"PageantRequest%08x\"", "%", "thread", ".", "get_ident", "(", ")", "pymap", "=", "_winapi", ".", "MemoryMap", "(", "map_name", ",", "_AGENT_MAX_MSGLEN", ",", "_winapi", ".", "get_security_attributes_for_user", "(", ")", ")", "with", "pymap", ":", "pymap", ".", "write", "(", "msg", ")", "# Create an array buffer containing the mapped filename", "char_buffer", "=", "array", ".", "array", "(", "\"b\"", ",", "b", "(", "map_name", ")", "+", "zero_byte", ")", "# noqa", "char_buffer_address", ",", "char_buffer_size", "=", "char_buffer", ".", "buffer_info", "(", ")", "# Create a string to use for the SendMessage function call", "cds", "=", "COPYDATASTRUCT", "(", "_AGENT_COPYDATA_ID", ",", "char_buffer_size", ",", "char_buffer_address", ")", "response", "=", "ctypes", ".", "windll", ".", "user32", ".", "SendMessageA", "(", "hwnd", ",", "win32con_WM_COPYDATA", ",", "ctypes", ".", "sizeof", "(", "cds", ")", ",", "ctypes", ".", "byref", "(", "cds", ")", ")", "if", "response", ">", "0", ":", "pymap", ".", "seek", "(", "0", ")", "datalen", "=", "pymap", ".", "read", "(", "4", ")", "retlen", "=", "struct", ".", "unpack", "(", "\">I\"", ",", "datalen", ")", "[", "0", "]", "return", "datalen", "+", "pymap", ".", "read", "(", "retlen", ")", "return", "None" ]
34.5
0.000783
def download(*packages, **kwargs): ''' Download packages to the local disk. refresh force a refresh if set to True. If set to False (default) it depends on zypper if a refresh is executed. root operate on a different root directory. CLI example: .. code-block:: bash salt '*' pkg.download httpd salt '*' pkg.download httpd postfix ''' if not packages: raise SaltInvocationError('No packages specified') root = kwargs.get('root', None) refresh = kwargs.get('refresh', False) if refresh: refresh_db(root) pkg_ret = {} for dld_result in __zypper__(root=root).xml.call('download', *packages).getElementsByTagName("download-result"): repo = dld_result.getElementsByTagName("repository")[0] path = dld_result.getElementsByTagName("localfile")[0].getAttribute("path") pkg_info = { 'repository-name': repo.getAttribute('name'), 'repository-alias': repo.getAttribute('alias'), 'path': path, } key = _get_first_aggregate_text( dld_result.getElementsByTagName('name') ) if __salt__['lowpkg.checksum'](pkg_info['path'], root=root): pkg_ret[key] = pkg_info if pkg_ret: failed = [pkg for pkg in packages if pkg not in pkg_ret] if failed: pkg_ret['_error'] = ('The following package(s) failed to download: {0}'.format(', '.join(failed))) return pkg_ret raise CommandExecutionError( 'Unable to download packages: {0}'.format(', '.join(packages)) )
[ "def", "download", "(", "*", "packages", ",", "*", "*", "kwargs", ")", ":", "if", "not", "packages", ":", "raise", "SaltInvocationError", "(", "'No packages specified'", ")", "root", "=", "kwargs", ".", "get", "(", "'root'", ",", "None", ")", "refresh", "=", "kwargs", ".", "get", "(", "'refresh'", ",", "False", ")", "if", "refresh", ":", "refresh_db", "(", "root", ")", "pkg_ret", "=", "{", "}", "for", "dld_result", "in", "__zypper__", "(", "root", "=", "root", ")", ".", "xml", ".", "call", "(", "'download'", ",", "*", "packages", ")", ".", "getElementsByTagName", "(", "\"download-result\"", ")", ":", "repo", "=", "dld_result", ".", "getElementsByTagName", "(", "\"repository\"", ")", "[", "0", "]", "path", "=", "dld_result", ".", "getElementsByTagName", "(", "\"localfile\"", ")", "[", "0", "]", ".", "getAttribute", "(", "\"path\"", ")", "pkg_info", "=", "{", "'repository-name'", ":", "repo", ".", "getAttribute", "(", "'name'", ")", ",", "'repository-alias'", ":", "repo", ".", "getAttribute", "(", "'alias'", ")", ",", "'path'", ":", "path", ",", "}", "key", "=", "_get_first_aggregate_text", "(", "dld_result", ".", "getElementsByTagName", "(", "'name'", ")", ")", "if", "__salt__", "[", "'lowpkg.checksum'", "]", "(", "pkg_info", "[", "'path'", "]", ",", "root", "=", "root", ")", ":", "pkg_ret", "[", "key", "]", "=", "pkg_info", "if", "pkg_ret", ":", "failed", "=", "[", "pkg", "for", "pkg", "in", "packages", "if", "pkg", "not", "in", "pkg_ret", "]", "if", "failed", ":", "pkg_ret", "[", "'_error'", "]", "=", "(", "'The following package(s) failed to download: {0}'", ".", "format", "(", "', '", ".", "join", "(", "failed", ")", ")", ")", "return", "pkg_ret", "raise", "CommandExecutionError", "(", "'Unable to download packages: {0}'", ".", "format", "(", "', '", ".", "join", "(", "packages", ")", ")", ")" ]
30.346154
0.002455
def send(MESSAGE, SOCKET, MESSAGE_ID=None, CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None, **kwargs): r"""Send a message to the journal. >>> journal.send('Hello world') >>> journal.send('Hello, again, world', FIELD2='Greetings!') >>> journal.send('Binary message', BINARY=b'\xde\xad\xbe\xef') Value of the MESSAGE argument will be used for the MESSAGE= field. MESSAGE must be a string and will be sent as UTF-8 to the journal. MESSAGE_ID can be given to uniquely identify the type of message. It must be a string or a uuid.UUID object. CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to identify the caller. Unless at least on of the three is given, values are extracted from the stack frame of the caller of send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE must be an integer. Additional fields for the journal entry can only be specified as keyword arguments. The payload can be either a string or bytes. A string will be sent as UTF-8, and bytes will be sent as-is to the journal. Other useful fields include PRIORITY, SYSLOG_FACILITY, SYSLOG_IDENTIFIER, SYSLOG_PID. """ args = ['MESSAGE=' + MESSAGE] if MESSAGE_ID is not None: id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID) args.append('MESSAGE_ID=' + id) if CODE_LINE == CODE_FILE == CODE_FUNC == None: CODE_FILE, CODE_LINE, CODE_FUNC = \ _traceback.extract_stack(limit=2)[0][:3] if CODE_FILE is not None: args.append('CODE_FILE=' + CODE_FILE) if CODE_LINE is not None: args.append('CODE_LINE={:d}'.format(CODE_LINE)) if CODE_FUNC is not None: args.append('CODE_FUNC=' + CODE_FUNC) args.extend(_make_line(key.upper(), val) for key, val in kwargs.items()) return sendv(SOCKET, *args)
[ "def", "send", "(", "MESSAGE", ",", "SOCKET", ",", "MESSAGE_ID", "=", "None", ",", "CODE_FILE", "=", "None", ",", "CODE_LINE", "=", "None", ",", "CODE_FUNC", "=", "None", ",", "*", "*", "kwargs", ")", ":", "args", "=", "[", "'MESSAGE='", "+", "MESSAGE", "]", "if", "MESSAGE_ID", "is", "not", "None", ":", "id", "=", "getattr", "(", "MESSAGE_ID", ",", "'hex'", ",", "MESSAGE_ID", ")", "args", ".", "append", "(", "'MESSAGE_ID='", "+", "id", ")", "if", "CODE_LINE", "==", "CODE_FILE", "==", "CODE_FUNC", "==", "None", ":", "CODE_FILE", ",", "CODE_LINE", ",", "CODE_FUNC", "=", "_traceback", ".", "extract_stack", "(", "limit", "=", "2", ")", "[", "0", "]", "[", ":", "3", "]", "if", "CODE_FILE", "is", "not", "None", ":", "args", ".", "append", "(", "'CODE_FILE='", "+", "CODE_FILE", ")", "if", "CODE_LINE", "is", "not", "None", ":", "args", ".", "append", "(", "'CODE_LINE={:d}'", ".", "format", "(", "CODE_LINE", ")", ")", "if", "CODE_FUNC", "is", "not", "None", ":", "args", ".", "append", "(", "'CODE_FUNC='", "+", "CODE_FUNC", ")", "args", ".", "extend", "(", "_make_line", "(", "key", ".", "upper", "(", ")", ",", "val", ")", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ")", "return", "sendv", "(", "SOCKET", ",", "*", "args", ")" ]
36.836735
0.001079
def _jdn(self): """Return the Julian date number for the given date.""" if self._last_updated == "gdate": return conv.gdate_to_jdn(self.gdate) return conv.hdate_to_jdn(self.hdate)
[ "def", "_jdn", "(", "self", ")", ":", "if", "self", ".", "_last_updated", "==", "\"gdate\"", ":", "return", "conv", ".", "gdate_to_jdn", "(", "self", ".", "gdate", ")", "return", "conv", ".", "hdate_to_jdn", "(", "self", ".", "hdate", ")" ]
42.2
0.009302
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Set html field with correct iframe. """ if self.url: iframe_html = '<iframe src="{}" frameborder="0" title="{}" allowfullscreen></iframe>' self.html = iframe_html.format( self.get_embed_url(), self.title ) return super().save(force_insert, force_update, using, update_fields)
[ "def", "save", "(", "self", ",", "force_insert", "=", "False", ",", "force_update", "=", "False", ",", "using", "=", "None", ",", "update_fields", "=", "None", ")", ":", "if", "self", ".", "url", ":", "iframe_html", "=", "'<iframe src=\"{}\" frameborder=\"0\" title=\"{}\" allowfullscreen></iframe>'", "self", ".", "html", "=", "iframe_html", ".", "format", "(", "self", ".", "get_embed_url", "(", ")", ",", "self", ".", "title", ")", "return", "super", "(", ")", ".", "save", "(", "force_insert", ",", "force_update", ",", "using", ",", "update_fields", ")" ]
50.111111
0.008715
def update_generators(): '''Update the context of all generators Ads useful variables and translations into the template context and interlink translations ''' for generator in _GENERATOR_DB.keys(): install_templates_translations(generator) add_variables_to_context(generator) interlink_static_files(generator) interlink_removed_content(generator) interlink_translated_content(generator)
[ "def", "update_generators", "(", ")", ":", "for", "generator", "in", "_GENERATOR_DB", ".", "keys", "(", ")", ":", "install_templates_translations", "(", "generator", ")", "add_variables_to_context", "(", "generator", ")", "interlink_static_files", "(", "generator", ")", "interlink_removed_content", "(", "generator", ")", "interlink_translated_content", "(", "generator", ")" ]
36.416667
0.002232
def is_extension_type(arr): """ Check whether an array-like is of a pandas extension class instance. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external to it like scipy sparse matrices), and datetime-like arrays. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is of a pandas extension class instance. Examples -------- >>> is_extension_type([1, 2, 3]) False >>> is_extension_type(np.array([1, 2, 3])) False >>> >>> cat = pd.Categorical([1, 2, 3]) >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) True >>> is_extension_type(pd.SparseArray([1, 2, 3])) True >>> is_extension_type(pd.SparseSeries([1, 2, 3])) True >>> >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True """ if is_categorical(arr): return True elif is_sparse(arr): return True elif is_datetime64tz_dtype(arr): return True return False
[ "def", "is_extension_type", "(", "arr", ")", ":", "if", "is_categorical", "(", "arr", ")", ":", "return", "True", "elif", "is_sparse", "(", "arr", ")", ":", "return", "True", "elif", "is_datetime64tz_dtype", "(", "arr", ")", ":", "return", "True", "return", "False" ]
25.245614
0.000669
def expires(duration, vary=None, currtime=time.time): """Decorator. Apply on a :class:`wsgiservice.Resource` method to set the max-age cache control parameter to the given duration. Also calculates the correct ``Expires`` response header. :param duration: Age which this resource may have before becoming stale. :type duration: :mod:`datetime.timedelta` :param vary: List of headers that should be added to the Vary response header. :type vary: list of strings :param currtime: Function used to find out the current UTC time. This is used for testing and not required in production code. :type currtime: Function returning a :mod:`time.struct_time` """ if isinstance(duration, timedelta): duration = timedelta_to_seconds(duration) @decorator def _expires(func, *args, **kwargs): "Sets the expirations header to the given duration." res = args[0].response res.cache_control.max_age = duration res.expires = currtime() + duration if vary: if res.vary is None: res.vary = vary else: # A bit completed because res.vary is usually a tuple. res.vary = list(set(list(res.vary) + list(vary))) return func(*args, **kwargs) return _expires
[ "def", "expires", "(", "duration", ",", "vary", "=", "None", ",", "currtime", "=", "time", ".", "time", ")", ":", "if", "isinstance", "(", "duration", ",", "timedelta", ")", ":", "duration", "=", "timedelta_to_seconds", "(", "duration", ")", "@", "decorator", "def", "_expires", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"Sets the expirations header to the given duration.\"", "res", "=", "args", "[", "0", "]", ".", "response", "res", ".", "cache_control", ".", "max_age", "=", "duration", "res", ".", "expires", "=", "currtime", "(", ")", "+", "duration", "if", "vary", ":", "if", "res", ".", "vary", "is", "None", ":", "res", ".", "vary", "=", "vary", "else", ":", "# A bit completed because res.vary is usually a tuple.", "res", ".", "vary", "=", "list", "(", "set", "(", "list", "(", "res", ".", "vary", ")", "+", "list", "(", "vary", ")", ")", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_expires" ]
38.911765
0.000737
def _notify_add_at(self, index, length=1): """Notify about an AddChange at a caertain index and length.""" slice_ = self._slice_at(index, length) self._notify_add(slice_)
[ "def", "_notify_add_at", "(", "self", ",", "index", ",", "length", "=", "1", ")", ":", "slice_", "=", "self", ".", "_slice_at", "(", "index", ",", "length", ")", "self", ".", "_notify_add", "(", "slice_", ")" ]
47.75
0.010309
def sky(lon=None,lat=None,size=1): """ Outputs uniform points on sphere from: [0 < lon < 360] & [-90 < lat < 90] """ if lon is None: umin,umax = 0,1 else: lon = np.asarray(lon) lon = np.radians(lon + 360.*(lon<0)) if lon.size==1: umin=umax=lon/(2*np.pi) elif lon.size==2: umin,umax=lon/(2*np.pi) else: raise Exception('...') if lat is None: vmin,vmax = -1,1 else: lat = np.asarray(lat) lat = np.radians(90 - lat) if lat.size==1: vmin=vmax=np.cos(lat) elif lat.size==2: vmin,vmax=np.cos(lat) else: raise Exception('...') phi = 2*np.pi*np.random.uniform(umin,umax,size=size) theta = np.arcsin(np.random.uniform(vmin,vmax,size=size)) return np.degrees(phi),np.degrees(theta)
[ "def", "sky", "(", "lon", "=", "None", ",", "lat", "=", "None", ",", "size", "=", "1", ")", ":", "if", "lon", "is", "None", ":", "umin", ",", "umax", "=", "0", ",", "1", "else", ":", "lon", "=", "np", ".", "asarray", "(", "lon", ")", "lon", "=", "np", ".", "radians", "(", "lon", "+", "360.", "*", "(", "lon", "<", "0", ")", ")", "if", "lon", ".", "size", "==", "1", ":", "umin", "=", "umax", "=", "lon", "/", "(", "2", "*", "np", ".", "pi", ")", "elif", "lon", ".", "size", "==", "2", ":", "umin", ",", "umax", "=", "lon", "/", "(", "2", "*", "np", ".", "pi", ")", "else", ":", "raise", "Exception", "(", "'...'", ")", "if", "lat", "is", "None", ":", "vmin", ",", "vmax", "=", "-", "1", ",", "1", "else", ":", "lat", "=", "np", ".", "asarray", "(", "lat", ")", "lat", "=", "np", ".", "radians", "(", "90", "-", "lat", ")", "if", "lat", ".", "size", "==", "1", ":", "vmin", "=", "vmax", "=", "np", ".", "cos", "(", "lat", ")", "elif", "lat", ".", "size", "==", "2", ":", "vmin", ",", "vmax", "=", "np", ".", "cos", "(", "lat", ")", "else", ":", "raise", "Exception", "(", "'...'", ")", "phi", "=", "2", "*", "np", ".", "pi", "*", "np", ".", "random", ".", "uniform", "(", "umin", ",", "umax", ",", "size", "=", "size", ")", "theta", "=", "np", ".", "arcsin", "(", "np", ".", "random", ".", "uniform", "(", "vmin", ",", "vmax", ",", "size", "=", "size", ")", ")", "return", "np", ".", "degrees", "(", "phi", ")", ",", "np", ".", "degrees", "(", "theta", ")" ]
30.769231
0.041212
def validate(self, request, data): """ Validate response from OpenID server. Set identity in case of successfull validation. """ client = consumer.Consumer(request.session, None) try: resp = client.complete(data, request.session['openid_return_to']) except KeyError: messages.error(request, lang.INVALID_RESPONSE_FROM_OPENID) return redirect('netauth-login') if resp.status == consumer.CANCEL: messages.warning(request, lang.OPENID_CANCELED) return redirect('netauth-login') elif resp.status == consumer.FAILURE: messages.error(request, lang.OPENID_FAILED % resp.message) return redirect('netauth-login') elif resp.status == consumer.SUCCESS: self.identity = resp.identity_url del request.session['openid_return_to'] return resp
[ "def", "validate", "(", "self", ",", "request", ",", "data", ")", ":", "client", "=", "consumer", ".", "Consumer", "(", "request", ".", "session", ",", "None", ")", "try", ":", "resp", "=", "client", ".", "complete", "(", "data", ",", "request", ".", "session", "[", "'openid_return_to'", "]", ")", "except", "KeyError", ":", "messages", ".", "error", "(", "request", ",", "lang", ".", "INVALID_RESPONSE_FROM_OPENID", ")", "return", "redirect", "(", "'netauth-login'", ")", "if", "resp", ".", "status", "==", "consumer", ".", "CANCEL", ":", "messages", ".", "warning", "(", "request", ",", "lang", ".", "OPENID_CANCELED", ")", "return", "redirect", "(", "'netauth-login'", ")", "elif", "resp", ".", "status", "==", "consumer", ".", "FAILURE", ":", "messages", ".", "error", "(", "request", ",", "lang", ".", "OPENID_FAILED", "%", "resp", ".", "message", ")", "return", "redirect", "(", "'netauth-login'", ")", "elif", "resp", ".", "status", "==", "consumer", ".", "SUCCESS", ":", "self", ".", "identity", "=", "resp", ".", "identity_url", "del", "request", ".", "session", "[", "'openid_return_to'", "]", "return", "resp" ]
41.272727
0.002153
def get(self, query, responseformat="geojson", verbosity="body", build=True): """Pass in an Overpass query in Overpass QL.""" # Construct full Overpass query if build: full_query = self._construct_ql_query( query, responseformat=responseformat, verbosity=verbosity ) else: full_query = query if self.debug: logging.getLogger().info(query) # Get the response from Overpass r = self._get_from_overpass(full_query) content_type = r.headers.get("content-type") if self.debug: print(content_type) if content_type == "text/csv": result = [] reader = csv.reader(StringIO(r.text), delimiter="\t") for row in reader: result.append(row) return result elif content_type in ("text/xml", "application/xml", "application/osm3s+xml"): return r.text elif content_type == "application/json": response = json.loads(r.text) if not build: return response # Check for valid answer from Overpass. # A valid answer contains an 'elements' key at the root level. if "elements" not in response: raise UnknownOverpassError("Received an invalid answer from Overpass.") # If there is a 'remark' key, it spells trouble. overpass_remark = response.get("remark", None) if overpass_remark and overpass_remark.startswith("runtime error"): raise ServerRuntimeError(overpass_remark) if responseformat is not "geojson": return response # construct geojson return self._as_geojson(response["elements"])
[ "def", "get", "(", "self", ",", "query", ",", "responseformat", "=", "\"geojson\"", ",", "verbosity", "=", "\"body\"", ",", "build", "=", "True", ")", ":", "# Construct full Overpass query", "if", "build", ":", "full_query", "=", "self", ".", "_construct_ql_query", "(", "query", ",", "responseformat", "=", "responseformat", ",", "verbosity", "=", "verbosity", ")", "else", ":", "full_query", "=", "query", "if", "self", ".", "debug", ":", "logging", ".", "getLogger", "(", ")", ".", "info", "(", "query", ")", "# Get the response from Overpass", "r", "=", "self", ".", "_get_from_overpass", "(", "full_query", ")", "content_type", "=", "r", ".", "headers", ".", "get", "(", "\"content-type\"", ")", "if", "self", ".", "debug", ":", "print", "(", "content_type", ")", "if", "content_type", "==", "\"text/csv\"", ":", "result", "=", "[", "]", "reader", "=", "csv", ".", "reader", "(", "StringIO", "(", "r", ".", "text", ")", ",", "delimiter", "=", "\"\\t\"", ")", "for", "row", "in", "reader", ":", "result", ".", "append", "(", "row", ")", "return", "result", "elif", "content_type", "in", "(", "\"text/xml\"", ",", "\"application/xml\"", ",", "\"application/osm3s+xml\"", ")", ":", "return", "r", ".", "text", "elif", "content_type", "==", "\"application/json\"", ":", "response", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "if", "not", "build", ":", "return", "response", "# Check for valid answer from Overpass.", "# A valid answer contains an 'elements' key at the root level.", "if", "\"elements\"", "not", "in", "response", ":", "raise", "UnknownOverpassError", "(", "\"Received an invalid answer from Overpass.\"", ")", "# If there is a 'remark' key, it spells trouble.", "overpass_remark", "=", "response", ".", "get", "(", "\"remark\"", ",", "None", ")", "if", "overpass_remark", "and", "overpass_remark", ".", "startswith", "(", "\"runtime error\"", ")", ":", "raise", "ServerRuntimeError", "(", "overpass_remark", ")", "if", "responseformat", "is", "not", "\"geojson\"", ":", "return", "response", "# construct geojson", "return", "self", ".", "_as_geojson", "(", "response", "[", "\"elements\"", "]", ")" ]
35.6875
0.002273
def colorz(fd, n=DEFAULT_NUM_COLORS, min_v=DEFAULT_MINV, max_v=DEFAULT_MAXV, bold_add=DEFAULT_BOLD_ADD, order_colors=True): """ Get the n most dominant colors of an image. Clamps value to between min_v and max_v. Creates bold colors using bold_add. Total number of colors returned is 2*n, optionally ordered by hue. Returns as a list of pairs of RGB triples. For terminal colors, the hue order is: red, yellow, green, cyan, blue, magenta """ img = Image.open(fd) img.thumbnail(THUMB_SIZE) obs = get_colors(img) clamped = [clamp(color, min_v, max_v) for color in obs] clusters, _ = kmeans(array(clamped).astype(float), n) colors = order_by_hue(clusters) if order_colors else clusters return list(zip(colors, [brighten(c, bold_add) for c in colors]))
[ "def", "colorz", "(", "fd", ",", "n", "=", "DEFAULT_NUM_COLORS", ",", "min_v", "=", "DEFAULT_MINV", ",", "max_v", "=", "DEFAULT_MAXV", ",", "bold_add", "=", "DEFAULT_BOLD_ADD", ",", "order_colors", "=", "True", ")", ":", "img", "=", "Image", ".", "open", "(", "fd", ")", "img", ".", "thumbnail", "(", "THUMB_SIZE", ")", "obs", "=", "get_colors", "(", "img", ")", "clamped", "=", "[", "clamp", "(", "color", ",", "min_v", ",", "max_v", ")", "for", "color", "in", "obs", "]", "clusters", ",", "_", "=", "kmeans", "(", "array", "(", "clamped", ")", ".", "astype", "(", "float", ")", ",", "n", ")", "colors", "=", "order_by_hue", "(", "clusters", ")", "if", "order_colors", "else", "clusters", "return", "list", "(", "zip", "(", "colors", ",", "[", "brighten", "(", "c", ",", "bold_add", ")", "for", "c", "in", "colors", "]", ")", ")" ]
38.380952
0.001211
def generateOutputInflowFile(self, out_nc, start_datetime_utc, number_of_timesteps, simulation_time_step_seconds, in_rapid_connect_file, in_rivid_lat_lon_z_file, land_surface_model_description, modeling_institution ): """ Generate inflow file for RAPID """ self.simulation_time_step_seconds = simulation_time_step_seconds # Create output inflow netcdf data print("Generating inflow file ...") data_out_nc = Dataset(out_nc, "w", format="NETCDF3_CLASSIC") rivid_list = np.loadtxt(in_rapid_connect_file, delimiter=",", ndmin=1, usecols=(0,), dtype=int) # create dimensions data_out_nc.createDimension('time', number_of_timesteps) data_out_nc.createDimension('rivid', len(rivid_list)) data_out_nc.createDimension('nv', 2) # create variables # m3_riv m3_riv_var = data_out_nc.createVariable('m3_riv', 'f4', ('time', 'rivid'), fill_value=0) m3_riv_var.long_name = 'accumulated external water volume ' \ 'inflow upstream of each river reach' m3_riv_var.units = 'm3' m3_riv_var.coordinates = 'lon lat' m3_riv_var.grid_mapping = 'crs' m3_riv_var.cell_methods = "time: sum" data_out_nc.close() try: data_out_nc = Dataset(out_nc, "a", format="NETCDF3_CLASSIC") # rivid rivid_var = data_out_nc.createVariable('rivid', 'i4', ('rivid',)) rivid_var.long_name = 'unique identifier for each river reach' rivid_var.units = '1' rivid_var.cf_role = 'timeseries_id' rivid_var[:] = rivid_list # time time_var = data_out_nc.createVariable('time', 'i4', ('time',)) time_var.long_name = 'time' time_var.standard_name = 'time' time_var.units = 'seconds since 1970-01-01 00:00:00+00:00' time_var.axis = 'T' time_var.calendar = 'gregorian' time_var.bounds = 'time_bnds' initial_time_seconds = \ (start_datetime_utc.replace(tzinfo=utc) - datetime(1970, 1, 1, tzinfo=utc)).total_seconds() final_time_seconds = \ initial_time_seconds + number_of_timesteps\ * simulation_time_step_seconds time_array = np.arange(initial_time_seconds, final_time_seconds, simulation_time_step_seconds) time_var[:] = time_array # time_bnds time_bnds_var = data_out_nc.createVariable('time_bnds', 'i4', ('time', 'nv',)) for time_index, time_element in enumerate(time_array): time_bnds_var[time_index, 0] = time_element time_bnds_var[time_index, 1] = \ time_element + simulation_time_step_seconds # longitude lon_var = data_out_nc.createVariable('lon', 'f8', ('rivid',), fill_value=-9999.0) lon_var.long_name = \ 'longitude of a point related to each river reach' lon_var.standard_name = 'longitude' lon_var.units = 'degrees_east' lon_var.axis = 'X' # latitude lat_var = data_out_nc.createVariable('lat', 'f8', ('rivid',), fill_value=-9999.0) lat_var.long_name = \ 'latitude of a point related to each river reach' lat_var.standard_name = 'latitude' lat_var.units = 'degrees_north' lat_var.axis = 'Y' crs_var = data_out_nc.createVariable('crs', 'i4') crs_var.grid_mapping_name = 'latitude_longitude' crs_var.epsg_code = 'EPSG:4326' # WGS 84 crs_var.semi_major_axis = 6378137.0 crs_var.inverse_flattening = 298.257223563 # add global attributes data_out_nc.Conventions = 'CF-1.6' data_out_nc.title = 'RAPID Inflow from {0}'\ .format(land_surface_model_description) data_out_nc.history = 'date_created: {0}'\ .format(datetime.utcnow().replace(tzinfo=utc)) data_out_nc.featureType = 'timeSeries' data_out_nc.institution = modeling_institution # write lat lon data self._write_lat_lon(data_out_nc, in_rivid_lat_lon_z_file) # close file data_out_nc.close() except RuntimeError: print("File size too big to add data beforehand." " Performing conversion after ...")
[ "def", "generateOutputInflowFile", "(", "self", ",", "out_nc", ",", "start_datetime_utc", ",", "number_of_timesteps", ",", "simulation_time_step_seconds", ",", "in_rapid_connect_file", ",", "in_rivid_lat_lon_z_file", ",", "land_surface_model_description", ",", "modeling_institution", ")", ":", "self", ".", "simulation_time_step_seconds", "=", "simulation_time_step_seconds", "# Create output inflow netcdf data\r", "print", "(", "\"Generating inflow file ...\"", ")", "data_out_nc", "=", "Dataset", "(", "out_nc", ",", "\"w\"", ",", "format", "=", "\"NETCDF3_CLASSIC\"", ")", "rivid_list", "=", "np", ".", "loadtxt", "(", "in_rapid_connect_file", ",", "delimiter", "=", "\",\"", ",", "ndmin", "=", "1", ",", "usecols", "=", "(", "0", ",", ")", ",", "dtype", "=", "int", ")", "# create dimensions\r", "data_out_nc", ".", "createDimension", "(", "'time'", ",", "number_of_timesteps", ")", "data_out_nc", ".", "createDimension", "(", "'rivid'", ",", "len", "(", "rivid_list", ")", ")", "data_out_nc", ".", "createDimension", "(", "'nv'", ",", "2", ")", "# create variables\r", "# m3_riv\r", "m3_riv_var", "=", "data_out_nc", ".", "createVariable", "(", "'m3_riv'", ",", "'f4'", ",", "(", "'time'", ",", "'rivid'", ")", ",", "fill_value", "=", "0", ")", "m3_riv_var", ".", "long_name", "=", "'accumulated external water volume '", "'inflow upstream of each river reach'", "m3_riv_var", ".", "units", "=", "'m3'", "m3_riv_var", ".", "coordinates", "=", "'lon lat'", "m3_riv_var", ".", "grid_mapping", "=", "'crs'", "m3_riv_var", ".", "cell_methods", "=", "\"time: sum\"", "data_out_nc", ".", "close", "(", ")", "try", ":", "data_out_nc", "=", "Dataset", "(", "out_nc", ",", "\"a\"", ",", "format", "=", "\"NETCDF3_CLASSIC\"", ")", "# rivid\r", "rivid_var", "=", "data_out_nc", ".", "createVariable", "(", "'rivid'", ",", "'i4'", ",", "(", "'rivid'", ",", ")", ")", "rivid_var", ".", "long_name", "=", "'unique identifier for each river reach'", "rivid_var", ".", "units", "=", "'1'", "rivid_var", ".", "cf_role", "=", "'timeseries_id'", "rivid_var", "[", ":", "]", "=", "rivid_list", "# time\r", "time_var", "=", "data_out_nc", ".", "createVariable", "(", "'time'", ",", "'i4'", ",", "(", "'time'", ",", ")", ")", "time_var", ".", "long_name", "=", "'time'", "time_var", ".", "standard_name", "=", "'time'", "time_var", ".", "units", "=", "'seconds since 1970-01-01 00:00:00+00:00'", "time_var", ".", "axis", "=", "'T'", "time_var", ".", "calendar", "=", "'gregorian'", "time_var", ".", "bounds", "=", "'time_bnds'", "initial_time_seconds", "=", "(", "start_datetime_utc", ".", "replace", "(", "tzinfo", "=", "utc", ")", "-", "datetime", "(", "1970", ",", "1", ",", "1", ",", "tzinfo", "=", "utc", ")", ")", ".", "total_seconds", "(", ")", "final_time_seconds", "=", "initial_time_seconds", "+", "number_of_timesteps", "*", "simulation_time_step_seconds", "time_array", "=", "np", ".", "arange", "(", "initial_time_seconds", ",", "final_time_seconds", ",", "simulation_time_step_seconds", ")", "time_var", "[", ":", "]", "=", "time_array", "# time_bnds\r", "time_bnds_var", "=", "data_out_nc", ".", "createVariable", "(", "'time_bnds'", ",", "'i4'", ",", "(", "'time'", ",", "'nv'", ",", ")", ")", "for", "time_index", ",", "time_element", "in", "enumerate", "(", "time_array", ")", ":", "time_bnds_var", "[", "time_index", ",", "0", "]", "=", "time_element", "time_bnds_var", "[", "time_index", ",", "1", "]", "=", "time_element", "+", "simulation_time_step_seconds", "# longitude\r", "lon_var", "=", "data_out_nc", ".", "createVariable", "(", "'lon'", ",", "'f8'", ",", "(", "'rivid'", ",", ")", ",", "fill_value", "=", "-", "9999.0", ")", "lon_var", ".", "long_name", "=", "'longitude of a point related to each river reach'", "lon_var", ".", "standard_name", "=", "'longitude'", "lon_var", ".", "units", "=", "'degrees_east'", "lon_var", ".", "axis", "=", "'X'", "# latitude\r", "lat_var", "=", "data_out_nc", ".", "createVariable", "(", "'lat'", ",", "'f8'", ",", "(", "'rivid'", ",", ")", ",", "fill_value", "=", "-", "9999.0", ")", "lat_var", ".", "long_name", "=", "'latitude of a point related to each river reach'", "lat_var", ".", "standard_name", "=", "'latitude'", "lat_var", ".", "units", "=", "'degrees_north'", "lat_var", ".", "axis", "=", "'Y'", "crs_var", "=", "data_out_nc", ".", "createVariable", "(", "'crs'", ",", "'i4'", ")", "crs_var", ".", "grid_mapping_name", "=", "'latitude_longitude'", "crs_var", ".", "epsg_code", "=", "'EPSG:4326'", "# WGS 84\r", "crs_var", ".", "semi_major_axis", "=", "6378137.0", "crs_var", ".", "inverse_flattening", "=", "298.257223563", "# add global attributes\r", "data_out_nc", ".", "Conventions", "=", "'CF-1.6'", "data_out_nc", ".", "title", "=", "'RAPID Inflow from {0}'", "", ".", "format", "(", "land_surface_model_description", ")", "data_out_nc", ".", "history", "=", "'date_created: {0}'", ".", "format", "(", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "tzinfo", "=", "utc", ")", ")", "data_out_nc", ".", "featureType", "=", "'timeSeries'", "data_out_nc", ".", "institution", "=", "modeling_institution", "# write lat lon data\r", "self", ".", "_write_lat_lon", "(", "data_out_nc", ",", "in_rivid_lat_lon_z_file", ")", "# close file\r", "data_out_nc", ".", "close", "(", ")", "except", "RuntimeError", ":", "print", "(", "\"File size too big to add data beforehand.\"", "\" Performing conversion after ...\"", ")" ]
44.882353
0.002015
def pshp_soundex_first(fname, max_length=4, german=False): """Calculate the PSHP Soundex/Viewex Coding of a first name. This is a wrapper for :py:meth:`PSHPSoundexFirst.encode`. Parameters ---------- fname : str The first name to encode max_length : int The length of the code returned (defaults to 4) german : bool Set to True if the name is German (different rules apply) Returns ------- str The PSHP Soundex/Viewex Coding Examples -------- >>> pshp_soundex_first('Smith') 'S530' >>> pshp_soundex_first('Waters') 'W352' >>> pshp_soundex_first('James') 'J700' >>> pshp_soundex_first('Schmidt') 'S500' >>> pshp_soundex_first('Ashcroft') 'A220' >>> pshp_soundex_first('John') 'J500' >>> pshp_soundex_first('Colin') 'K400' >>> pshp_soundex_first('Niall') 'N400' >>> pshp_soundex_first('Sally') 'S400' >>> pshp_soundex_first('Jane') 'J500' """ return PSHPSoundexFirst().encode(fname, max_length, german)
[ "def", "pshp_soundex_first", "(", "fname", ",", "max_length", "=", "4", ",", "german", "=", "False", ")", ":", "return", "PSHPSoundexFirst", "(", ")", ".", "encode", "(", "fname", ",", "max_length", ",", "german", ")" ]
23.409091
0.000932
def message(self): """ Return issue message. """ message = self.description.format(**self.parameters) return '{code} {message}'.format(code=self.code, message=message)
[ "def", "message", "(", "self", ")", ":", "message", "=", "self", ".", "description", ".", "format", "(", "*", "*", "self", ".", "parameters", ")", "return", "'{code} {message}'", ".", "format", "(", "code", "=", "self", ".", "code", ",", "message", "=", "message", ")" ]
33.666667
0.009662
def reportMatchCompletion(cfg, results, replayData): """send information back to the server about the match's winners/losers""" payload = json.dumps([cfg.flatten(), results, replayData]) ladder = cfg.ladder return requests.post( url = c.URL_BASE%(ladder.ipAddress, ladder.serverPort, "matchfinished"), data = payload, #headers=headers, )
[ "def", "reportMatchCompletion", "(", "cfg", ",", "results", ",", "replayData", ")", ":", "payload", "=", "json", ".", "dumps", "(", "[", "cfg", ".", "flatten", "(", ")", ",", "results", ",", "replayData", "]", ")", "ladder", "=", "cfg", ".", "ladder", "return", "requests", ".", "post", "(", "url", "=", "c", ".", "URL_BASE", "%", "(", "ladder", ".", "ipAddress", ",", "ladder", ".", "serverPort", ",", "\"matchfinished\"", ")", ",", "data", "=", "payload", ",", "#headers=headers,", ")" ]
41.555556
0.02356
def Pager(self, service): """A page generator for this service query and the provided service. This generates a page as a result from using the provided service's query() method until there are no more results to fetch. Args: service: The service object for making a query using this service query. Yields: A resulting page from querying the provided service. """ has_page = True while has_page: page = service.query(self) yield page has_page = self.HasNext(page) if has_page: self.NextPage()
[ "def", "Pager", "(", "self", ",", "service", ")", ":", "has_page", "=", "True", "while", "has_page", ":", "page", "=", "service", ".", "query", "(", "self", ")", "yield", "page", "has_page", "=", "self", ".", "HasNext", "(", "page", ")", "if", "has_page", ":", "self", ".", "NextPage", "(", ")" ]
29.105263
0.008757
def _read_attr(attr_name): """ Parse attribute from file 'pefile.py' and avoid importing this module directly. __version__, __author__, __contact__, """ regex = attr_name + r"\s+=\s+'(.+)'" if sys.version_info.major == 2: with open('pefile.py', 'r') as f: match = re.search(regex, f.read()) else: with open('pefile.py', 'r', encoding='utf-8') as f: match = re.search(regex, f.read()) # Second item in the group is the value of attribute. return match.group(1)
[ "def", "_read_attr", "(", "attr_name", ")", ":", "regex", "=", "attr_name", "+", "r\"\\s+=\\s+'(.+)'\"", "if", "sys", ".", "version_info", ".", "major", "==", "2", ":", "with", "open", "(", "'pefile.py'", ",", "'r'", ")", "as", "f", ":", "match", "=", "re", ".", "search", "(", "regex", ",", "f", ".", "read", "(", ")", ")", "else", ":", "with", "open", "(", "'pefile.py'", ",", "'r'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "match", "=", "re", ".", "search", "(", "regex", ",", "f", ".", "read", "(", ")", ")", "# Second item in the group is the value of attribute.", "return", "match", ".", "group", "(", "1", ")" ]
32.8125
0.001852
def _get_private_key_obj(private_key, passphrase=None): ''' Returns a private key object based on PEM text. ''' private_key = _text_or_file(private_key) private_key = get_pem_entry(private_key, pem_type='(?:RSA )?PRIVATE KEY') rsaprivkey = M2Crypto.RSA.load_key_string( private_key, callback=_passphrase_callback(passphrase)) evpprivkey = M2Crypto.EVP.PKey() evpprivkey.assign_rsa(rsaprivkey) return evpprivkey
[ "def", "_get_private_key_obj", "(", "private_key", ",", "passphrase", "=", "None", ")", ":", "private_key", "=", "_text_or_file", "(", "private_key", ")", "private_key", "=", "get_pem_entry", "(", "private_key", ",", "pem_type", "=", "'(?:RSA )?PRIVATE KEY'", ")", "rsaprivkey", "=", "M2Crypto", ".", "RSA", ".", "load_key_string", "(", "private_key", ",", "callback", "=", "_passphrase_callback", "(", "passphrase", ")", ")", "evpprivkey", "=", "M2Crypto", ".", "EVP", ".", "PKey", "(", ")", "evpprivkey", ".", "assign_rsa", "(", "rsaprivkey", ")", "return", "evpprivkey" ]
40.363636
0.002203
def is_measure(self): """Return true if the colum is a dimension""" from ambry.valuetype.core import ROLE return self.role == ROLE.MEASURE
[ "def", "is_measure", "(", "self", ")", ":", "from", "ambry", ".", "valuetype", ".", "core", "import", "ROLE", "return", "self", ".", "role", "==", "ROLE", ".", "MEASURE" ]
39.75
0.012346
def auto_repr(obj: Any, with_addr: bool = False, sort_attrs: bool = True, joiner: str = COMMA_SPACE) -> str: """ Convenience function for :func:`__repr__`. Works its way through the object's ``__dict__`` and reports accordingly. Args: obj: object to display with_addr: include the memory address of ``obj`` sort_attrs: sort the attributes into alphabetical order? joiner: string with which to join the elements Returns: string: :func:`repr`-style representation """ if sort_attrs: keys = sorted(obj.__dict__.keys()) else: keys = obj.__dict__.keys() elements = ["{}={}".format(k, repr(getattr(obj, k))) for k in keys] return repr_result(obj, elements, with_addr=with_addr, joiner=joiner)
[ "def", "auto_repr", "(", "obj", ":", "Any", ",", "with_addr", ":", "bool", "=", "False", ",", "sort_attrs", ":", "bool", "=", "True", ",", "joiner", ":", "str", "=", "COMMA_SPACE", ")", "->", "str", ":", "if", "sort_attrs", ":", "keys", "=", "sorted", "(", "obj", ".", "__dict__", ".", "keys", "(", ")", ")", "else", ":", "keys", "=", "obj", ".", "__dict__", ".", "keys", "(", ")", "elements", "=", "[", "\"{}={}\"", ".", "format", "(", "k", ",", "repr", "(", "getattr", "(", "obj", ",", "k", ")", ")", ")", "for", "k", "in", "keys", "]", "return", "repr_result", "(", "obj", ",", "elements", ",", "with_addr", "=", "with_addr", ",", "joiner", "=", "joiner", ")" ]
37.047619
0.001253
def _bootstrap_debian(name, **kwargs): ''' Bootstrap a Debian Linux container ''' version = kwargs.get('version', False) if not version: if __grains__['os'].lower() == 'debian': version = __grains__['osrelease'] else: version = 'stable' release_blacklist = ['hamm', 'slink', 'potato', 'woody', 'sarge', 'etch', 'lenny', 'squeeze', 'wheezy'] if version in release_blacklist: raise CommandExecutionError( 'Unsupported Debian version "{0}". ' 'Only "stable" or "jessie" and newer are supported'.format(version) ) dst = _make_container_root(name) cmd = 'debootstrap --arch=amd64 {0} {1}'.format(version, dst) ret = __salt__['cmd.run_all'](cmd, python_shell=False) if ret['retcode'] != 0: _build_failed(dst, name) return ret
[ "def", "_bootstrap_debian", "(", "name", ",", "*", "*", "kwargs", ")", ":", "version", "=", "kwargs", ".", "get", "(", "'version'", ",", "False", ")", "if", "not", "version", ":", "if", "__grains__", "[", "'os'", "]", ".", "lower", "(", ")", "==", "'debian'", ":", "version", "=", "__grains__", "[", "'osrelease'", "]", "else", ":", "version", "=", "'stable'", "release_blacklist", "=", "[", "'hamm'", ",", "'slink'", ",", "'potato'", ",", "'woody'", ",", "'sarge'", ",", "'etch'", ",", "'lenny'", ",", "'squeeze'", ",", "'wheezy'", "]", "if", "version", "in", "release_blacklist", ":", "raise", "CommandExecutionError", "(", "'Unsupported Debian version \"{0}\". '", "'Only \"stable\" or \"jessie\" and newer are supported'", ".", "format", "(", "version", ")", ")", "dst", "=", "_make_container_root", "(", "name", ")", "cmd", "=", "'debootstrap --arch=amd64 {0} {1}'", ".", "format", "(", "version", ",", "dst", ")", "ret", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "ret", "[", "'retcode'", "]", "!=", "0", ":", "_build_failed", "(", "dst", ",", "name", ")", "return", "ret" ]
34.75
0.002334
def request_client_list(self, req, msg): """Request the list of connected clients. The list of clients is sent as a sequence of #client-list informs. Informs ------- addr : str The address of the client as host:port with host in dotted quad notation. If the address of the client could not be determined (because, for example, the client disconnected suddenly) then a unique string representing the client is sent instead. Returns ------- success : {'ok', 'fail'} Whether sending the client list succeeded. informs : int Number of #client-list inform messages sent. Examples -------- :: ?client-list #client-list 127.0.0.1:53600 !client-list ok 1 """ # TODO Get list of ClientConnection* instances and implement a standard # 'address-print' method in the ClientConnection class clients = self._client_conns num_clients = len(clients) for conn in clients: addr = conn.address req.inform(addr) return req.make_reply('ok', str(num_clients))
[ "def", "request_client_list", "(", "self", ",", "req", ",", "msg", ")", ":", "# TODO Get list of ClientConnection* instances and implement a standard", "# 'address-print' method in the ClientConnection class", "clients", "=", "self", ".", "_client_conns", "num_clients", "=", "len", "(", "clients", ")", "for", "conn", "in", "clients", ":", "addr", "=", "conn", ".", "address", "req", ".", "inform", "(", "addr", ")", "return", "req", ".", "make_reply", "(", "'ok'", ",", "str", "(", "num_clients", ")", ")" ]
32.189189
0.00163
def get_mutations(study_id, gene_list, mutation_type=None, case_id=None): """Return mutations as a list of genes and list of amino acid changes. Parameters ---------- study_id : str The ID of the cBio study. Example: 'cellline_ccle_broad' or 'paad_icgc' gene_list : list[str] A list of genes with their HGNC symbols. Example: ['BRAF', 'KRAS'] mutation_type : Optional[str] The type of mutation to filter to. mutation_type can be one of: missense, nonsense, frame_shift_ins, frame_shift_del, splice_site case_id : Optional[str] The case ID within the study to filter to. Returns ------- mutations : tuple[list] A tuple of two lists, the first one containing a list of genes, and the second one a list of amino acid changes in those genes. """ genetic_profile = get_genetic_profiles(study_id, 'mutation')[0] gene_list_str = ','.join(gene_list) data = {'cmd': 'getMutationData', 'case_set_id': study_id, 'genetic_profile_id': genetic_profile, 'gene_list': gene_list_str, 'skiprows': -1} df = send_request(**data) if case_id: df = df[df['case_id'] == case_id] res = _filter_data_frame(df, ['gene_symbol', 'amino_acid_change'], 'mutation_type', mutation_type) mutations = {'gene_symbol': list(res['gene_symbol'].values()), 'amino_acid_change': list(res['amino_acid_change'].values())} return mutations
[ "def", "get_mutations", "(", "study_id", ",", "gene_list", ",", "mutation_type", "=", "None", ",", "case_id", "=", "None", ")", ":", "genetic_profile", "=", "get_genetic_profiles", "(", "study_id", ",", "'mutation'", ")", "[", "0", "]", "gene_list_str", "=", "','", ".", "join", "(", "gene_list", ")", "data", "=", "{", "'cmd'", ":", "'getMutationData'", ",", "'case_set_id'", ":", "study_id", ",", "'genetic_profile_id'", ":", "genetic_profile", ",", "'gene_list'", ":", "gene_list_str", ",", "'skiprows'", ":", "-", "1", "}", "df", "=", "send_request", "(", "*", "*", "data", ")", "if", "case_id", ":", "df", "=", "df", "[", "df", "[", "'case_id'", "]", "==", "case_id", "]", "res", "=", "_filter_data_frame", "(", "df", ",", "[", "'gene_symbol'", ",", "'amino_acid_change'", "]", ",", "'mutation_type'", ",", "mutation_type", ")", "mutations", "=", "{", "'gene_symbol'", ":", "list", "(", "res", "[", "'gene_symbol'", "]", ".", "values", "(", ")", ")", ",", "'amino_acid_change'", ":", "list", "(", "res", "[", "'amino_acid_change'", "]", ".", "values", "(", ")", ")", "}", "return", "mutations" ]
37.439024
0.000635
def remove_spurious_insertions(scaffolds): """Remove all bins whose left and right neighbors belong to the same, different scaffold. Example with three such insertions in two different scaffolds: >>> scaffolds = { ... "scaffold1": [ ... ["contig1", 0, 0, 100, 1], ... ["contig1", 1, 100, 200, 1], ... ["contig23", 53, 1845, 2058, -1], # <-- insertion ... ["contig1", 4, 254, 408, 1], ... ["contig1", 7, 805, 1253, 1], ... ["contig5", 23, 1500, 1605, -1], ... ["contig65", 405, 32145, 45548, -1], # <-- insertion ... ["contig5", 22, 1385, 1499, -1], ... ], ... "scaffold2": [ ... ["contig8", 0, 0, 250, 1], ... ["contig17", 2454, 8754, -1], # <-- insertion ... ["contig8", 2, 320, 480, 1], ... ], ... } >>> new_scaffolds = remove_spurious_insertions(scaffolds) >>> for my_bin in new_scaffolds['scaffold1']: ... print(my_bin) ... ['contig1', 0, 0, 100, 1] ['contig1', 1, 100, 200, 1] ['contig1', 4, 254, 408, 1] ['contig1', 7, 805, 1253, 1] ['contig5', 23, 1500, 1605, -1] ['contig5', 22, 1385, 1499, -1] >>> for my_bin in new_scaffolds['scaffold2']: ... print(my_bin) ... ['contig8', 0, 0, 250, 1] ['contig8', 2, 320, 480, 1] """ scaffolds = format_info_frags(scaffolds) new_scaffolds = {} for name, scaffold in scaffolds.items(): new_scaffold = [] if len(scaffold) > 2: for i in range(len(scaffold)): # First take care of edge cases: *-- or --* if i == 0: if not ( scaffold[i][0] != scaffold[i + 1][0] and scaffold[i + 1][0] == scaffold[i + 2][0] ): new_scaffold.append(scaffold[i]) elif i == len(scaffold) - 1: if not ( scaffold[i][0] != scaffold[i - 1][0] and scaffold[i - 1][0] == scaffold[i - 2][0] ): new_scaffold.append(scaffold[i]) # Otherwise, looking for -*- else: if not ( scaffold[i - 1][0] == scaffold[i + 1][0] and scaffold[i - 1][0] != scaffold[i][0] ): new_scaffold.append(scaffold[i]) else: # Can't remove insertions if 2 bins or less new_scaffold = copy.deepcopy(scaffold) new_scaffolds[name] = new_scaffold return new_scaffolds
[ "def", "remove_spurious_insertions", "(", "scaffolds", ")", ":", "scaffolds", "=", "format_info_frags", "(", "scaffolds", ")", "new_scaffolds", "=", "{", "}", "for", "name", ",", "scaffold", "in", "scaffolds", ".", "items", "(", ")", ":", "new_scaffold", "=", "[", "]", "if", "len", "(", "scaffold", ")", ">", "2", ":", "for", "i", "in", "range", "(", "len", "(", "scaffold", ")", ")", ":", "# First take care of edge cases: *-- or --*", "if", "i", "==", "0", ":", "if", "not", "(", "scaffold", "[", "i", "]", "[", "0", "]", "!=", "scaffold", "[", "i", "+", "1", "]", "[", "0", "]", "and", "scaffold", "[", "i", "+", "1", "]", "[", "0", "]", "==", "scaffold", "[", "i", "+", "2", "]", "[", "0", "]", ")", ":", "new_scaffold", ".", "append", "(", "scaffold", "[", "i", "]", ")", "elif", "i", "==", "len", "(", "scaffold", ")", "-", "1", ":", "if", "not", "(", "scaffold", "[", "i", "]", "[", "0", "]", "!=", "scaffold", "[", "i", "-", "1", "]", "[", "0", "]", "and", "scaffold", "[", "i", "-", "1", "]", "[", "0", "]", "==", "scaffold", "[", "i", "-", "2", "]", "[", "0", "]", ")", ":", "new_scaffold", ".", "append", "(", "scaffold", "[", "i", "]", ")", "# Otherwise, looking for -*-", "else", ":", "if", "not", "(", "scaffold", "[", "i", "-", "1", "]", "[", "0", "]", "==", "scaffold", "[", "i", "+", "1", "]", "[", "0", "]", "and", "scaffold", "[", "i", "-", "1", "]", "[", "0", "]", "!=", "scaffold", "[", "i", "]", "[", "0", "]", ")", ":", "new_scaffold", ".", "append", "(", "scaffold", "[", "i", "]", ")", "else", ":", "# Can't remove insertions if 2 bins or less", "new_scaffold", "=", "copy", ".", "deepcopy", "(", "scaffold", ")", "new_scaffolds", "[", "name", "]", "=", "new_scaffold", "return", "new_scaffolds" ]
35.423077
0.000352
def operational(ctx, commands, format, xpath): """ Execute operational mode command(s). This function will send operational mode commands to a Junos device. jaide.utils.clean_lines() is used to determine how we are receiving commands, and ignore comment lines or blank lines in a command file. @param ctx: The click context paramter, for receiving the object dictionary | being manipulated by other previous functions. Needed by any | function with the @click.pass_context decorator. @type ctx: click.Context @param commands: The op commands to send to the device. Can be one of | four things: | 1. A single op command as a string. | 2. A string of comma separated op commands. | 3. A python list of op commands. | 4. A filepath of a file with op commands on each | line. @type commands: str @param format: String specifying what format to request for the | response from the device. Defaults to 'text', but | also accepts 'xml'. @type format: str @param xpath: An xpath expression on which we should filter the results. | This enforces 'xml' for the format of the response. @type xpath: str @returns: None. Functions part of click relating to the command group | 'main' do not return anything. Click handles passing context | between the functions and maintaing command order and chaining. """ mp_pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2) for ip in ctx.obj['hosts']: mp_pool.apply_async(wrap.open_connection, args=(ip, ctx.obj['conn']['username'], ctx.obj['conn']['password'], wrap.command, [commands, format, xpath], ctx.obj['out'], ctx.obj['conn']['connect_timeout'], ctx.obj['conn']['session_timeout'], ctx.obj['conn']['port']), callback=write_out) mp_pool.close() mp_pool.join()
[ "def", "operational", "(", "ctx", ",", "commands", ",", "format", ",", "xpath", ")", ":", "mp_pool", "=", "multiprocessing", ".", "Pool", "(", "multiprocessing", ".", "cpu_count", "(", ")", "*", "2", ")", "for", "ip", "in", "ctx", ".", "obj", "[", "'hosts'", "]", ":", "mp_pool", ".", "apply_async", "(", "wrap", ".", "open_connection", ",", "args", "=", "(", "ip", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'username'", "]", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'password'", "]", ",", "wrap", ".", "command", ",", "[", "commands", ",", "format", ",", "xpath", "]", ",", "ctx", ".", "obj", "[", "'out'", "]", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'connect_timeout'", "]", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'session_timeout'", "]", ",", "ctx", ".", "obj", "[", "'conn'", "]", "[", "'port'", "]", ")", ",", "callback", "=", "write_out", ")", "mp_pool", ".", "close", "(", ")", "mp_pool", ".", "join", "(", ")" ]
49.5
0.00045
def _merge_cdicts(self, clut, exdict, separator): """Merge callable look-up tables from two objects.""" if not self._full_cname: return # Find all callables that are not in self exceptions dictionary # and create new tokens for them repl_dict = {} for key, value in _sorted_keys_items(clut): otoken = self._clut.get(key, None) if not otoken: otoken = str(len(self._clut)) self._clut[key] = otoken repl_dict[value] = otoken # Update other dictionaries to the mapping to self # exceptions dictionary for fdict in exdict.values(): for entry in fdict.values(): olist = [] for item in entry["function"]: if item is None: # Callable name is None when callable is # part of exclude list olist.append(None) else: itokens = item.split(separator) itokens = [repl_dict.get(itoken) for itoken in itokens] olist.append(separator.join(itokens)) entry["function"] = olist
[ "def", "_merge_cdicts", "(", "self", ",", "clut", ",", "exdict", ",", "separator", ")", ":", "if", "not", "self", ".", "_full_cname", ":", "return", "# Find all callables that are not in self exceptions dictionary", "# and create new tokens for them", "repl_dict", "=", "{", "}", "for", "key", ",", "value", "in", "_sorted_keys_items", "(", "clut", ")", ":", "otoken", "=", "self", ".", "_clut", ".", "get", "(", "key", ",", "None", ")", "if", "not", "otoken", ":", "otoken", "=", "str", "(", "len", "(", "self", ".", "_clut", ")", ")", "self", ".", "_clut", "[", "key", "]", "=", "otoken", "repl_dict", "[", "value", "]", "=", "otoken", "# Update other dictionaries to the mapping to self", "# exceptions dictionary", "for", "fdict", "in", "exdict", ".", "values", "(", ")", ":", "for", "entry", "in", "fdict", ".", "values", "(", ")", ":", "olist", "=", "[", "]", "for", "item", "in", "entry", "[", "\"function\"", "]", ":", "if", "item", "is", "None", ":", "# Callable name is None when callable is", "# part of exclude list", "olist", ".", "append", "(", "None", ")", "else", ":", "itokens", "=", "item", ".", "split", "(", "separator", ")", "itokens", "=", "[", "repl_dict", ".", "get", "(", "itoken", ")", "for", "itoken", "in", "itokens", "]", "olist", ".", "append", "(", "separator", ".", "join", "(", "itokens", ")", ")", "entry", "[", "\"function\"", "]", "=", "olist" ]
39.892857
0.000874
def set_autocamera(self,mode='density'): """ - set_autocamera(mode='density'): By default, Scene defines its own Camera. However, there is no a general way for doing so. Scene uses a density criterion for getting the point of view. If this is not a good option for your problem, you can choose among: |'minmax'|'density'|'median'|'mean'|. If None of the previous methods work well, you may define the camera params by yourself. """ self.Camera.set_autocamera(self._Particles,mode=mode) self._camera_params = self.Camera.get_params() self._x, self._y, self._hsml, self._kview = self.__compute_scene() self._m = self._Particles._mass[self._kview]
[ "def", "set_autocamera", "(", "self", ",", "mode", "=", "'density'", ")", ":", "self", ".", "Camera", ".", "set_autocamera", "(", "self", ".", "_Particles", ",", "mode", "=", "mode", ")", "self", ".", "_camera_params", "=", "self", ".", "Camera", ".", "get_params", "(", ")", "self", ".", "_x", ",", "self", ".", "_y", ",", "self", ".", "_hsml", ",", "self", ".", "_kview", "=", "self", ".", "__compute_scene", "(", ")", "self", ".", "_m", "=", "self", ".", "_Particles", ".", "_mass", "[", "self", ".", "_kview", "]" ]
56.230769
0.009421
def create(self): """ Creates the directory and all its parent directories if it does not exist yet """ if self.dirname and not os.path.exists(self.dirname): os.makedirs(self.dirname)
[ "def", "create", "(", "self", ")", ":", "if", "self", ".", "dirname", "and", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "dirname", ")", ":", "os", ".", "makedirs", "(", "self", ".", "dirname", ")" ]
37
0.008811
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the Poll request payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is missing from the encoded payload. """ super(PollRequestPayload, self).read( input_stream, kmip_version=kmip_version ) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next( enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE, local_stream ): self._asynchronous_correlation_value = primitives.ByteString( tag=enums.Tags.ASYNCHRONOUS_CORRELATION_VALUE ) self._asynchronous_correlation_value.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
[ "def", "read", "(", "self", ",", "input_stream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "super", "(", "PollRequestPayload", ",", "self", ")", ".", "read", "(", "input_stream", ",", "kmip_version", "=", "kmip_version", ")", "local_stream", "=", "utils", ".", "BytearrayStream", "(", "input_stream", ".", "read", "(", "self", ".", "length", ")", ")", "if", "self", ".", "is_tag_next", "(", "enums", ".", "Tags", ".", "ASYNCHRONOUS_CORRELATION_VALUE", ",", "local_stream", ")", ":", "self", ".", "_asynchronous_correlation_value", "=", "primitives", ".", "ByteString", "(", "tag", "=", "enums", ".", "Tags", ".", "ASYNCHRONOUS_CORRELATION_VALUE", ")", "self", ".", "_asynchronous_correlation_value", ".", "read", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "is_oversized", "(", "local_stream", ")" ]
36.611111
0.001478
def rand_crop(*args, padding_mode='reflection', p:float=1.): "Randomized version of `crop_pad`." return crop_pad(*args, **rand_pos, padding_mode=padding_mode, p=p)
[ "def", "rand_crop", "(", "*", "args", ",", "padding_mode", "=", "'reflection'", ",", "p", ":", "float", "=", "1.", ")", ":", "return", "crop_pad", "(", "*", "args", ",", "*", "*", "rand_pos", ",", "padding_mode", "=", "padding_mode", ",", "p", "=", "p", ")" ]
56.333333
0.023392
def elcm_session_delete(irmc_info, session_id, terminate=False): """send an eLCM request to remove a session from the session list :param irmc_info: node info :param session_id: session id :param terminate: a running session must be terminated before removing :raises: ELCMSessionNotFound if the session does not exist :raises: SCCIClientError if SCCI failed """ # Terminate the session first if needs to if terminate: # Get session status to check session = elcm_session_get_status(irmc_info, session_id) status = session['Session']['Status'] # Terminate session if it is activated or running if status == 'running' or status == 'activated': elcm_session_terminate(irmc_info, session_id) # Send DELETE request to the server resp = elcm_request(irmc_info, method='DELETE', path='/sessionInformation/%s/remove' % session_id) if resp.status_code == 200: return elif resp.status_code == 404: raise ELCMSessionNotFound('Session "%s" does not exist' % session_id) else: raise scci.SCCIClientError(('Failed to remove session ' '"%(session)s" with error code %(error)s' % {'session': session_id, 'error': resp.status_code}))
[ "def", "elcm_session_delete", "(", "irmc_info", ",", "session_id", ",", "terminate", "=", "False", ")", ":", "# Terminate the session first if needs to", "if", "terminate", ":", "# Get session status to check", "session", "=", "elcm_session_get_status", "(", "irmc_info", ",", "session_id", ")", "status", "=", "session", "[", "'Session'", "]", "[", "'Status'", "]", "# Terminate session if it is activated or running", "if", "status", "==", "'running'", "or", "status", "==", "'activated'", ":", "elcm_session_terminate", "(", "irmc_info", ",", "session_id", ")", "# Send DELETE request to the server", "resp", "=", "elcm_request", "(", "irmc_info", ",", "method", "=", "'DELETE'", ",", "path", "=", "'/sessionInformation/%s/remove'", "%", "session_id", ")", "if", "resp", ".", "status_code", "==", "200", ":", "return", "elif", "resp", ".", "status_code", "==", "404", ":", "raise", "ELCMSessionNotFound", "(", "'Session \"%s\" does not exist'", "%", "session_id", ")", "else", ":", "raise", "scci", ".", "SCCIClientError", "(", "(", "'Failed to remove session '", "'\"%(session)s\" with error code %(error)s'", "%", "{", "'session'", ":", "session_id", ",", "'error'", ":", "resp", ".", "status_code", "}", ")", ")" ]
41.757576
0.000709