Source code for Topyfic.analysis
import pandas as pd
@@ -125,8 +134,10 @@ Source code for Topyfic.analysis
warnings.filterwarnings('ignore')
-[docs]class Analysis:
- """
+
+[docs]
+class Analysis:
+ """
A class used to investigate the topics and gene weights compositions
:param Top_model: top model that used for analysing topics, gene weights compositions and calculate cell participation
@@ -157,8 +168,10 @@ Source code for Topyfic.analysis
self.cell_participation = cell_participation
-[docs] def calculate_cell_participation(self, data):
- """
+
+[docs]
+ def calculate_cell_participation(self, data):
+ """
Calculate cell participation for give data
:param data: processed expression data along with cells and genes/region information
@@ -170,12 +183,15 @@ Source code for Topyfic.analysis
lda_output = self.top_model.model.transform(data.X)
cell_participation = pd.DataFrame(lda_output,
- columns=[f"Topic_{i + 1}" for i in range(self.top_model.N)],
+ columns=[f"Topic_{i + 1}" for i in range(self.top_model.N)],
# list(self.top_model.topics.keys())
index=data.obs.index)
self.cell_participation = anndata.AnnData(cell_participation, obs=data.obs)
-[docs] def pie_structure_Chart(self,
+
+
+[docs]
+ def pie_structure_Chart(self,
level,
category=None,
ascending=None,
@@ -185,7 +201,7 @@ Source code for Topyfic.analysis
figsize=None,
file_format="pdf",
file_name="piechart_topicAvgCell"):
- """
+ """
plot pie charts that shows contribution of each topics to each category (i.e cell type)
:param level: name of the column from cell_participation.obs
@@ -276,7 +292,10 @@ Source code for Topyfic.analysis
else:
plt.close()
-[docs] def structure_plot(self,
+
+
+[docs]
+ def structure_plot(self,
level,
category=None,
topic_order=None,
@@ -291,7 +310,7 @@ Source code for Topyfic.analysis
figsize=None,
file_format="pdf",
file_name="structure_topicAvgCell"):
- """
+ """
plot structure which shows contribution of each topics for each cells in given categories
:param level: name of the column from cell_participation.obs
@@ -512,9 +531,12 @@ Source code for Topyfic.analysis
else:
plt.close()
-[docs] @staticmethod
+
+
+[docs]
+ @staticmethod
def convertDatTraits(data):
- """
+ """
get data trait module base on samples information
:return: a dataframe contains information in suitable format for plotting module trait relationship heatmap
@@ -541,7 +563,10 @@ Source code for Topyfic.analysis
return datTraits
-[docs] def TopicTraitRelationshipHeatmap(self,
+
+
+[docs]
+ def TopicTraitRelationshipHeatmap(self,
metaData,
alternative='two-sided',
annotation=False,
@@ -549,7 +574,7 @@ Source code for Topyfic.analysis
show=True,
file_format="pdf",
file_name='topic-traitRelationships'):
- """
+ """
plot topic-trait relationship heatmap
:param metaData: traits you would like to see the relationship with topics (must be column name of cell_participation.obs)
@@ -637,7 +662,10 @@ Source code for Topyfic.analysis
else:
plt.close()
-[docs] def extract_cells(self,
+
+
+[docs]
+ def extract_cells(self,
level,
category,
top_cells=0.05,
@@ -645,7 +673,7 @@ Source code for Topyfic.analysis
min_cells=50,
file_name=None,
save=False):
- """
+ """
extract subset of cells and cells participation with specific criteria
:param level: name of the column from cell_participation.obs
@@ -707,7 +735,10 @@ Source code for Topyfic.analysis
else:
return selected_cell, selected_cell_participation
-[docs] def average_cell_participation(self,
+
+
+[docs]
+ def average_cell_participation(self,
label=None,
color="blue",
save=True,
@@ -715,7 +746,7 @@ Source code for Topyfic.analysis
figsize=None,
file_format="pdf",
file_name="average_cell_participation"):
- """
+ """
barplot showing average of cell participation in each topic
:param label: fill with dictionary contain mapping new name for each topics to name you want to show if you want to change default topic name
@@ -757,7 +788,10 @@ Source code for Topyfic.analysis
else:
plt.close()
-[docs] def average_cell_participation_line_plot(self,
+
+
+[docs]
+ def average_cell_participation_line_plot(self,
topic,
color,
category,
@@ -767,7 +801,7 @@ Source code for Topyfic.analysis
figsize=None,
file_format="pdf",
file_name="line_average_cell_participation"):
- """
+ """
line plot showing average of cell participation in topic divided by two features of cells (i.e. cell type and time point)
:param topic: name of the topic
@@ -840,7 +874,10 @@ Source code for Topyfic.analysis
else:
plt.close()
-[docs] def cell_participation_distribution(self,
+
+
+[docs]
+ def cell_participation_distribution(self,
plot_type="violin",
threshold=0.05,
max_topic=True,
@@ -851,7 +888,7 @@ Source code for Topyfic.analysis
figsize=None,
file_format="pdf",
file_name="dist_cell_participation"):
- """
+ """
plot showing distribution of max/all topics in cell participation for each topic
:param plot_type: type of the plot which can be "violin" or "bax"
@@ -907,7 +944,10 @@ Source code for Topyfic.analysis
else:
plt.close()
-[docs] def plot_topic_composition(self,
+
+
+[docs]
+ def plot_topic_composition(self,
category,
level="topic",
biotype="biotype",
@@ -916,7 +956,7 @@ Source code for Topyfic.analysis
show=True,
file_format="pdf",
file_name="gene_composition"):
- """
+ """
plot gene composition dividing by gene biotype or topics
:param category: topic name or gene biotype name you want to see gene composition for
@@ -969,7 +1009,7 @@ Source code for Topyfic.analysis
res = res.append(tmp, ignore_index=True)
else:
- gene_weights.columns = [f'Topic_{i + 1}' for i in range(gene_weights.shape[1])]
+ gene_weights.columns = [f'Topic_{i + 1}' for i in range(gene_weights.shape[1])]
res = pd.DataFrame(columns=gene_weights.columns,
index=range(gene_weights.shape[0]),
dtype=int)
@@ -1031,7 +1071,10 @@ Source code for Topyfic.analysis
else:
plt.close()
-[docs] def max_topic_cell_participation(self,
+
+
+[docs]
+ def max_topic_cell_participation(self,
cutoff=10,
color="blue",
title="Maximum cell topic participation for each cells",
@@ -1040,7 +1083,7 @@ Source code for Topyfic.analysis
figsize=None,
file_format="pdf",
file_name="max_topic_cell_participation"):
- """
+ """
step plot showing maximum cell participation
:param cutoff: indicate if you want to eliminate any cells with maximum participation less than this
@@ -1093,8 +1136,11 @@ Source code for Topyfic.analysis
else:
plt.close()
-[docs] def save_analysis(self, name=None, save_path=""):
- """
+
+
+[docs]
+ def save_analysis(self, name=None, save_path=""):
+ """
save Analysis class as a pickle file
:param name: name of the pickle file (default: analysis_Analysis.top_model.name)
@@ -1108,7 +1154,9 @@ Source code for Topyfic.analysis
picklefile = open(f"{save_path}{name}.p", "wb")
pickle.dump(self, picklefile)
- picklefile.close()
+ picklefile.close()
+
+
@@ -1123,7 +1171,7 @@ Source code for Topyfic.analysis
diff --git a/docs/html/_modules/Topyfic/topModel.html b/docs/html/_modules/Topyfic/topModel.html
index 303704a..9ccd128 100644
--- a/docs/html/_modules/Topyfic/topModel.html
+++ b/docs/html/_modules/Topyfic/topModel.html
@@ -1,18 +1,16 @@
-
+
Topyfic.topModel — Topyfic 0.0.11 documentation
-
-
-
-
-
-
-
-
+
+
+
+
+
+
@@ -101,7 +99,18 @@
-
+
+
+
+
Source code for Topyfic.topModel
import sys
@@ -121,8 +130,10 @@ Source code for Topyfic.topModel
from Topyfic.utilsAnalyseModel import MA_plot
-[docs]class TopModel:
- """
+
+[docs]
+class TopModel:
+ """
A class that saved a model
:param name: name of class
@@ -155,16 +166,18 @@ Source code for Topyfic.topModel
for i in range(N):
topic_gene_weights = pd.DataFrame(gene_weights.iloc[:, i].values,
index=gene_weights.index,
- columns=[f"Topic_{i + 1}"])
- self.topics[f"Topic_{i + 1}"] = Topic(topic_id=f"Topic_{i + 1}",
+ columns=[f"Topic_{i + 1}"])
+ self.topics[f"Topic_{i + 1}"] = Topic(topic_id=f"Topic_{i + 1}",
topic_gene_weights=topic_gene_weights,
gene_information=gene_information)
self.name = name
self.N = N
self.model = model
-[docs] def get_feature_name(self):
- """
+
+[docs]
+ def get_feature_name(self):
+ """
get feature(gene) name
:return: list of feature(gene) name
@@ -173,8 +186,11 @@ Source code for Topyfic.topModel
topic1 = next(iter(self.topics.items()))[1]
return topic1.gene_information.index.tolist()
-[docs] def save_rLDA_model(self, name='rLDA', save_path="", file_format='joblib'):
- """
+
+
+[docs]
+ def save_rLDA_model(self, name='rLDA', save_path="", file_format='joblib'):
+ """
save rLDA model (instance of LDA model in sklearn) as a joblib/HDF5 file.
:param name: name of the joblib file (default: rLDA)
@@ -208,32 +224,38 @@ Source code for Topyfic.topModel
f.close()
-[docs] def get_gene_weights(self):
- """
+
+
+[docs]
+ def get_gene_weights(self):
+ """
get feature(gene) weights
:return: dataframe contains feature(gene) weights; genes are indexes and topics are columns
:rtype: pandas dataframe
"""
gene_weights = pd.DataFrame(np.transpose(self.model.components_),
- columns=[f'{self.name}_Topic_{i + 1}' for i in
+ columns=[f'{self.name}_Topic_{i + 1}' for i in
range(self.model.components_.shape[0])],
index=self.get_feature_name())
return gene_weights
-[docs] def get_ranked_gene_weight(self):
- """
+
+
+[docs]
+ def get_ranked_gene_weight(self):
+ """
get sorted feature(gene) weights. each value is gene and weights on each topics
:return: dataframe contains feature(gene) and their weights; ranks are indexes and topics are columns
:rtype: pandas dataframe
"""
gene_weights = pd.DataFrame(np.transpose(self.model.components_),
- columns=[f'{self.name}_Topic_{i + 1}' for i in
+ columns=[f'{self.name}_Topic_{i + 1}' for i in
range(self.model.components_.shape[0])],
index=self.get_feature_name())
- ranked_gene_weights = pd.DataFrame(columns=[f'{self.name}_Topic_{i + 1}' for i in
+ ranked_gene_weights = pd.DataFrame(columns=[f'{self.name}_Topic_{i + 1}' for i in
range(self.model.components_.shape[0])],
index=range(len(self.get_feature_name())))
for col in gene_weights.columns:
@@ -243,8 +265,11 @@ Source code for Topyfic.topModel
ranked_gene_weights[col] = tmp[col].values
return ranked_gene_weights
-[docs] def get_top_model_attributes(self):
- """
+
+
+[docs]
+ def get_top_model_attributes(self):
+ """
get top model attributes to be able to make sklearn.decomposition.LatentDirichletAllocation
@@ -255,15 +280,15 @@ Source code for Topyfic.topModel
feature = self.get_feature_name()
components = pd.DataFrame(self.model.components_,
- index=[f"Topic_{i + 1}" for i in range(self.N)],
+ index=[f"Topic_{i + 1}" for i in range(self.N)],
columns=feature)
exp_dirichlet_component = pd.DataFrame(self.model.exp_dirichlet_component_,
- index=[f"Topic_{i + 1}" for i in range(self.N)],
+ index=[f"Topic_{i + 1}" for i in range(self.N)],
columns=feature)
others = pd.DataFrame(
- index=[f"Topic_{i + 1}" for i in range(self.N)],
+ index=[f"Topic_{i + 1}" for i in range(self.N)],
columns=["n_batch_iter",
"n_features_in",
"n_iter",
@@ -271,16 +296,19 @@ Source code for Topyfic.topModel
"doc_topic_prior",
"topic_word_prior"])
- others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "n_batch_iter"] = self.model.n_batch_iter_
- others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "n_features_in"] = self.model.n_features_in_
- others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "n_iter"] = self.model.n_iter_
- others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "bound"] = self.model.bound_
- others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "doc_topic_prior"] = self.model.doc_topic_prior_
- others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "topic_word_prior"] = self.model.topic_word_prior_
+ others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "n_batch_iter"] = self.model.n_batch_iter_
+ others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "n_features_in"] = self.model.n_features_in_
+ others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "n_iter"] = self.model.n_iter_
+ others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "bound"] = self.model.bound_
+ others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "doc_topic_prior"] = self.model.doc_topic_prior_
+ others.loc[[f"Topic_{i + 1}" for i in range(self.N)], "topic_word_prior"] = self.model.topic_word_prior_
return components, exp_dirichlet_component, others
-[docs] def gene_weight_rank_heatmap(self,
+
+
+[docs]
+ def gene_weight_rank_heatmap(self,
genes=None,
topics=None,
show_rank=True,
@@ -290,7 +318,7 @@ Source code for Topyfic.topModel
figsize=None,
file_format="pdf",
file_name="gene_weight_rank_heatmap"):
- """
+ """
plot selected genes weights and their ranks in selected topics
:param genes: list of genes you want to see their weights (default: all genes)
@@ -318,9 +346,9 @@ Source code for Topyfic.topModel
sys.exit("some/all of genes are not part of topModel!")
if topics is None:
- topics = [f'{self.name}_Topic_{i + 1}' for i in range(self.model.components_.shape[0])]
+ topics = [f'{self.name}_Topic_{i + 1}' for i in range(self.model.components_.shape[0])]
elif not (set(topics) & set(
- [f'{self.name}_Topic_{i + 1}' for i in range(self.model.components_.shape[0])])) == set(topics):
+ [f'{self.name}_Topic_{i + 1}' for i in range(self.model.components_.shape[0])])) == set(topics):
sys.exit("some/all of topics are not part of topModel!")
if scale not in ["log2", "log10", None]:
@@ -382,7 +410,10 @@ Source code for Topyfic.topModel
else:
plt.close()
-[docs] def MA_plot(self,
+
+
+[docs]
+ def MA_plot(self,
topic1,
topic2,
size=None,
@@ -396,7 +427,7 @@ Source code for Topyfic.topModel
show=True,
file_format="pdf",
file_name="MA_plot"):
- """
+ """
plot MA based on the gene weights on given topics
:param topic1: first topic to be compared
@@ -448,8 +479,11 @@ Source code for Topyfic.topModel
return gene_zscore
-[docs] def save_topModel(self, name=None, save_path="", file_format='pickle'):
- """
+
+
+[docs]
+ def save_topModel(self, name=None, save_path="", file_format='pickle'):
+ """
save TopModel class as a pickle/HDF5 file
:param name: name of the file (default: topModel_TopModel.name)
@@ -505,7 +539,9 @@ Source code for Topyfic.topModel
f['name'] = np.string_(self.name)
f['N'] = np.int_(self.N)
- f.close()
+ f.close()
+
+
@@ -521,7 +557,7 @@ Source code for Topyfic.topModel
diff --git a/docs/html/_modules/Topyfic/topic.html b/docs/html/_modules/Topyfic/topic.html
index 7cd5cec..1de6367 100644
--- a/docs/html/_modules/Topyfic/topic.html
+++ b/docs/html/_modules/Topyfic/topic.html
@@ -1,18 +1,16 @@
-
+
Topyfic.topic — Topyfic 0.0.11 documentation
-
-
-
-
-
-
-
-
+
+
+
+
+
+
@@ -101,7 +99,18 @@
-
+
+
+
+
Source code for Topyfic.topic
import sys
@@ -119,8 +128,10 @@ Source code for Topyfic.topic
from Topyfic.utilsAnalyseModel import GSEA, functional_enrichment_analysis
-[docs]class Topic:
- """
+
+[docs]
+class Topic:
+ """
A class saved topic along with other useful information
:param topic_id: ID of topic which is unique
@@ -161,8 +172,10 @@ Source code for Topyfic.topic
else:
self.topic_information = topic_information
-[docs] def update_gene_information(self, gene_information):
- """
+
+[docs]
+ def update_gene_information(self, gene_information):
+ """
update/add genes information for each topics
:param gene_information: dataframe contains genes information we would like to add/update (the index should be same as an index of gene_information in class)
@@ -172,14 +185,17 @@ Source code for Topyfic.topic
self.gene_information.drop(same_columns, axis=1, inplace=True)
self.gene_information = pd.concat([self.gene_information, gene_information], axis=1)
-[docs] def functional_enrichment_analysis(self,
+
+
+[docs]
+ def functional_enrichment_analysis(self,
type,
organism,
sets=None,
p_value=0.05,
file_format="pdf",
file_name="functional_enrichment_analysis"):
- """
+ """
Doing functional enrichment analysis including GO, KEGG and REACTOME
:param type: indicate the type of databases which it should be one of "GO", "REACTOME"
@@ -207,7 +223,10 @@ Source code for Topyfic.topic
file_format=file_format,
file_name=file_name)
-[docs] def GSEA(self,
+
+
+[docs]
+ def GSEA(self,
gene_sets='GO_Biological_Process_2021',
p_value=0.05,
table=True,
@@ -215,7 +234,7 @@ Source code for Topyfic.topic
file_format="pdf",
file_name="GSEA",
**kwargs):
- """
+ """
Doing Gene Set Enrichment Analysis on based on the topic weights using GSEAPY package.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. (you can add any Enrichr Libraries from here: https://maayanlab.cloud/Enrichr/#stats)
@@ -256,8 +275,11 @@ Source code for Topyfic.topic
return GSEA_df
-[docs] def gene_weight_variance(self, save=True):
- """
+
+
+[docs]
+ def gene_weight_variance(self, save=True):
+ """
calculate the gene weight variance
:param save: added as an information to the Topic (default: True)
@@ -273,8 +295,11 @@ Source code for Topyfic.topic
return print(f"Gene weight variance for given topic is {variance}")
-[docs] def write_topic_yaml(self, topic_id=None, model_yaml_path="model.yaml", topic_yaml_path="topic.yaml", save=True):
- """
+
+
+[docs]
+ def write_topic_yaml(self, topic_id=None, model_yaml_path="model.yaml", topic_yaml_path="topic.yaml", save=True):
+ """
write topic in YAML format
:param topic_id: unique topic ID (default is topic ID)
@@ -309,7 +334,9 @@ Source code for Topyfic.topic
else:
yaml_string = yaml.dump(topic_yaml)
print("The Topic YAML is:")
- print(yaml_string)
+ print(yaml_string)
+
+
@@ -324,7 +351,7 @@ Source code for Topyfic.topic
diff --git a/docs/html/_modules/Topyfic/train.html b/docs/html/_modules/Topyfic/train.html
index 51a0803..2af5595 100644
--- a/docs/html/_modules/Topyfic/train.html
+++ b/docs/html/_modules/Topyfic/train.html
@@ -1,18 +1,16 @@
-
+
Topyfic.train — Topyfic 0.0.11 documentation
-
-
-
-
-
-
-
-
+
+
+
+
+
+
@@ -101,7 +99,18 @@
-
+
+
+
+
Source code for Topyfic.train
import sys
@@ -123,8 +132,10 @@ Source code for Topyfic.train
warnings.filterwarnings("ignore")
-[docs]class Train:
- """
+
+[docs]
+class Train:
+ """
A class used to train reproducible latent dirichlet allocation (rLDA) model
:param name: name of the Train class
@@ -156,8 +167,10 @@ Source code for Topyfic.train
self.random_state_range = random_state_range
self.top_models = []
-[docs] def combine_LDA_models(self, data, single_trains=[]):
- """
+
+[docs]
+ def combine_LDA_models(self, data, single_trains=[]):
+ """
combine single top_model
:param data: data you used to learn model
@@ -167,7 +180,7 @@ Source code for Topyfic.train
"""
for i in range(len(single_trains)):
gene_weights = pd.DataFrame(np.transpose(single_trains[i].top_models[0].model.components_),
- columns=[f'Topic{i + 1}_R{self.random_state_range[i]}' for i in range(self.k)],
+ columns=[f'Topic{j + 1}_R{self.random_state_range[i]}' for j in range(self.k)],
index=data.var.index.tolist())
TopModel_lda_model = TopModel(name=f"{self.name}_{self.random_state_range[i]}",
N=gene_weights.shape[1],
@@ -175,8 +188,11 @@ Source code for Topyfic.train
model=single_trains[i].top_models[0].model)
self.top_models.append(TopModel_lda_model)
-[docs] def make_single_LDA_model(self, data, random_state, name, learning_method, batch_size, max_iter, n_jobs, kwargs):
- """
+
+
+[docs]
+ def make_single_LDA_model(self, data, random_state, name, learning_method, batch_size, max_iter, n_jobs, kwargs):
+ """
train simple LDA model using sklearn package and embed it to TopModel class
@@ -209,7 +225,7 @@ Source code for Topyfic.train
lda_model.fit_transform(data.to_df().to_numpy())
gene_weights = pd.DataFrame(np.transpose(lda_model.components_),
- columns=[f'Topic{i + 1}_R{random_state}' for i in range(self.k)],
+ columns=[f'Topic{i + 1}_R{random_state}' for i in range(self.k)],
index=data.var.index.tolist())
TopModel_lda_model = TopModel(name=f"{name}_{random_state}",
@@ -219,8 +235,11 @@ Source code for Topyfic.train
return TopModel_lda_model
-[docs] def run_LDA_models(self, data, learning_method="online", batch_size=1000, max_iter=10, n_jobs=None, n_thread=1, **kwargs):
- """
+
+
+[docs]
+ def run_LDA_models(self, data, learning_method="online", batch_size=1000, max_iter=10, n_jobs=None, n_thread=1, **kwargs):
+ """
train LDA models
@@ -253,8 +272,11 @@ Source code for Topyfic.train
repeat(max_iter), repeat(n_jobs), repeat(kwargs)))
print(f"{self.n_runs} LDA models with {self.k} topics learned\n")
-[docs] def make_LDA_models_attributes(self):
- """
+
+
+[docs]
+ def make_LDA_models_attributes(self):
+ """
make LDA attributes by combining all single LDA model attributes which you need to define LDA model (sklearn.decomposition.LatentDirichletAllocation)
@@ -267,15 +289,15 @@ Source code for Topyfic.train
feature = self.top_models[0].get_feature_name()
all_components = pd.DataFrame(
- index=[f"Topic{i + 1}_R{j}" for j in self.random_state_range for i in range(self.k)],
+ index=[f"Topic{i + 1}_R{j}" for j in self.random_state_range for i in range(self.k)],
columns=feature)
all_exp_dirichlet_component = pd.DataFrame(
- index=[f"Topic{i + 1}_R{j}" for j in self.random_state_range for i in range(self.k)],
+ index=[f"Topic{i + 1}_R{j}" for j in self.random_state_range for i in range(self.k)],
columns=feature)
all_others = pd.DataFrame(
- index=[f"Topic{i + 1}_R{j}" for j in self.random_state_range for i in range(self.k)],
+ index=[f"Topic{i + 1}_R{j}" for j in self.random_state_range for i in range(self.k)],
columns=["n_batch_iter",
"n_features_in",
"n_iter",
@@ -287,23 +309,26 @@ Source code for Topyfic.train
for random_state in self.random_state_range:
components, exp_dirichlet_component, others = self.top_models[count].get_top_model_attributes()
- all_components.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], :] = components.values
+ all_components.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], :] = components.values
- all_exp_dirichlet_component.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], :] = exp_dirichlet_component.values
+ all_exp_dirichlet_component.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], :] = exp_dirichlet_component.values
- all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "n_batch_iter"] = others.n_batch_iter.values
- all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "n_features_in"] = others.n_features_in.values
- all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "n_iter"] = others.n_iter.values
- all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "bound"] = others.bound.values
- all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "doc_topic_prior"] = others.doc_topic_prior.values
- all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "topic_word_prior"] = others.topic_word_prior.values
+ all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "n_batch_iter"] = others.n_batch_iter.values
+ all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "n_features_in"] = others.n_features_in.values
+ all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "n_iter"] = others.n_iter.values
+ all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "bound"] = others.bound.values
+ all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "doc_topic_prior"] = others.doc_topic_prior.values
+ all_others.loc[[f"Topic{i + 1}_R{random_state}" for i in range(self.k)], "topic_word_prior"] = others.topic_word_prior.values
count = count + 1
return all_components, all_exp_dirichlet_component, all_others
-[docs] def save_train(self, name=None, save_path="", file_format='pickle'):
- """
+
+
+[docs]
+ def save_train(self, name=None, save_path="", file_format='pickle'):
+ """
save Train class as a pickle file
:param name: name of the pickle file (default is train_Train.name)
@@ -348,7 +373,9 @@ Source code for Topyfic.train
f['n_runs'] = np.int_(self.n_runs)
f['random_state_range'] = np.array(list(self.random_state_range))
- f.close()
+ f.close()
+
+
@@ -363,7 +390,7 @@ Source code for Topyfic.train
diff --git a/docs/html/_modules/Topyfic/utilsAnalyseModel.html b/docs/html/_modules/Topyfic/utilsAnalyseModel.html
index 8344185..c60b760 100644
--- a/docs/html/_modules/Topyfic/utilsAnalyseModel.html
+++ b/docs/html/_modules/Topyfic/utilsAnalyseModel.html
@@ -1,18 +1,16 @@
-
+
Topyfic.utilsAnalyseModel — Topyfic 0.0.11 documentation
-
-
-
-
-
-
-
-
+
+
+
+
+
+
@@ -101,7 +99,18 @@
-
+
+
+
+
Source code for Topyfic.utilsAnalyseModel
import sys
@@ -137,7 +146,9 @@ Source code for Topyfic.utilsAnalyseModel
warnings.filterwarnings("ignore")
-[docs]def compare_topModels(topModels,
+
+[docs]
+def compare_topModels(topModels,
comparison_method="Jensen–Shannon divergence",
output_type='graph',
threshold=0.8,
@@ -149,7 +160,7 @@ Source code for Topyfic.utilsAnalyseModel
figsize=None,
plot_format="pdf",
file_name="compare_topics"):
- """
+ """
compare topModels using topic gene weights
:param topModels: list of topModel class you want to compare to each other
@@ -341,7 +352,10 @@ Source code for Topyfic.utilsAnalyseModel
return axs
-[docs]def MA_plot(topic1,
+
+
+[docs]
+def MA_plot(topic1,
topic2,
size=None,
pseudocount=1,
@@ -354,7 +368,7 @@ Source code for Topyfic.utilsAnalyseModel
show=True,
file_format="pdf",
file_name="MA_plot"):
- """
+ """
plot MA based on the gene weights on given topics
:param topic1: gene weight of first topic to be compared
@@ -477,8 +491,11 @@ Source code for Topyfic.utilsAnalyseModel
return gene_zscore
-[docs]def modified_zscore(data, consistency_correction=1.4826):
- """
+
+
+[docs]
+def modified_zscore(data, consistency_correction=1.4826):
+ """
Returns the modified z score and Median Absolute Deviation (MAD) from the scores in data.
The consistency_correction factor converts the MAD to the standard deviation for a given
distribution. The default value (1.4826) is the conversion factor if the underlying data
@@ -495,14 +512,17 @@ Source code for Topyfic.utilsAnalyseModel
return mod_zscore, mad
-[docs]def functional_enrichment_analysis(gene_list,
+
+
+[docs]
+def functional_enrichment_analysis(gene_list,
type,
organism,
sets=None,
p_value=0.05,
file_format="pdf",
file_name="functional_enrichment_analysis"):
- """
+ """
Doing functional enrichment analysis including GO, KEGG and REACTOME
:param gene_list: list of gene name
@@ -557,14 +577,17 @@ Source code for Topyfic.utilsAnalyseModel
p_value=str(p_value))
print(
- f"{numGeneModule - token_result['identifiersNotFound']} out of {numGeneModule} identifiers in the sample were found in Reactome.")
+ f"{numGeneModule - token_result['identifiersNotFound']} out of {numGeneModule} identifiers in the sample were found in Reactome.")
print(
f"{token_result['resourceSummary'][0]['pathways']} pathways were hit by at least one of them, which {len(token_result['pathways'])} of them have p-value more than {p_value}.")
print(f"Report was saved {file_name}!")
print(f"For more information please visit https://reactome.org/PathwayBrowser/#/DTAB=AN&ANALYSIS={token}")
-[docs]def GSEA(gene_list,
+
+
+[docs]
+def GSEA(gene_list,
gene_sets='GO_Biological_Process_2021',
p_value=0.05,
table=True,
@@ -572,7 +595,7 @@ Source code for Topyfic.utilsAnalyseModel
file_format="pdf",
file_name="GSEA",
**kwargs):
- """
+ """
Doing Gene Set Enrichment Analysis on based on the topic weights using GSEAPY package.
:param gene_list: pandas series with index as a gene names and their ranks/weights as value
@@ -622,11 +645,14 @@ Source code for Topyfic.utilsAnalyseModel
return pre_res.res2d
-[docs]def summarize_GO_Term(GO_terms,
+
+
+[docs]
+def summarize_GO_Term(GO_terms,
p_value=0.05,
file_format="html",
file_name="GO_sum"):
- """
+ """
Summarize long, unintelligible lists of GO terms by finding a representative subset of the terms showing more unique (child) Go terms
We suggest save it as html since it's gonna be plot by plotly so you can take an advantage of using plotly
@@ -704,6 +730,7 @@ Source code for Topyfic.utilsAnalyseModel
return df
+
@@ -718,7 +745,7 @@ Source code for Topyfic.utilsAnalyseModel
diff --git a/docs/html/_modules/Topyfic/utilsMakeModel.html b/docs/html/_modules/Topyfic/utilsMakeModel.html
index 9d9d028..45b2579 100644
--- a/docs/html/_modules/Topyfic/utilsMakeModel.html
+++ b/docs/html/_modules/Topyfic/utilsMakeModel.html
@@ -1,18 +1,16 @@
-
+
Topyfic.utilsMakeModel — Topyfic 0.0.11 documentation
-
-
-
-
-
-
-
-
+
+
+
+
+
+
@@ -101,7 +99,18 @@
-
+
+
+
+
Source code for Topyfic.utilsMakeModel
import sys
@@ -139,14 +148,16 @@ Source code for Topyfic.utilsMakeModel
warnings.filterwarnings("ignore")
-[docs]def train_model(name,
+
+[docs]
+def train_model(name,
data,
k,
n_runs=100,
random_state_range=None,
n_thread=5,
save_path=""):
- """
+ """
Training model and save it
:param name: name of the Train class
@@ -173,13 +184,16 @@ Source code for Topyfic.utilsMakeModel
train.save_train(save_path=save_path)
-[docs]def make_topModel(trains,
+
+
+[docs]
+def make_topModel(trains,
data,
n_top_genes=50,
resolution=1,
file_format="pdf",
save_path=""):
- """
+ """
Creating topModel base on train data and save it along with clustering information
:param trains: list of train class
@@ -205,11 +219,14 @@ Source code for Topyfic.utilsMakeModel
top_model.save_topModel(save_path=save_path)
-[docs]def make_analysis_class(top_model,
+
+
+[docs]
+def make_analysis_class(top_model,
data,
colors_topics=None,
save_path=""):
- """
+ """
Creating Analysis object
:param top_model: top model that used for analysing topics, gene weights compositions and calculate cell participation
@@ -226,8 +243,11 @@ Source code for Topyfic.utilsMakeModel
analysis_top_model.save_analysis(save_path=save_path)
-[docs]def subset_data(data, keep, loc='var'):
- """
+
+
+[docs]
+def subset_data(data, keep, loc='var'):
+ """
Subsetting data
:param data: data we want to subset
@@ -249,14 +269,17 @@ Source code for Topyfic.utilsMakeModel
return data
-[docs]def calculate_leiden_clustering(trains,
+
+
+[docs]
+def calculate_leiden_clustering(trains,
data,
- n_top_genes=50,
+ n_top_genes=None,
resolution=1,
max_iter_harmony=10,
min_cell_participation=None,
file_format="pdf"):
- """
+ """
Do leiden clustering w/o harmony base on number of assays you have and then remove low participation topics
:param trains: list of train class
@@ -353,7 +376,7 @@ Source code for Topyfic.utilsMakeModel
clusters=clustering)
lda_output = rlda.transform(data.X)
cell_participation = pd.DataFrame(np.round(lda_output, 2),
- columns=[f"Topic{i + 1}" for i in range(n_rtopics)],
+ columns=[f"Topic{i + 1}" for i in range(n_rtopics)],
index=data.obs.index)
keep = cell_participation.sum() > min_cell_participation
@@ -390,7 +413,10 @@ Source code for Topyfic.utilsMakeModel
return top_model, clustering, adata
-[docs]def plot_cluster_contribution(clustering,
+
+
+[docs]
+def plot_cluster_contribution(clustering,
feature,
show_all=False,
portion=True,
@@ -398,7 +424,7 @@ Source code for Topyfic.utilsMakeModel
show=True,
file_format="pdf",
file_name='cluster_contribution'):
- """
+ """
barplot shows number of topics contribute to each cluster
:param clustering: dataframe that map each single LDA run to each cluster
@@ -432,7 +458,7 @@ Source code for Topyfic.utilsMakeModel
res.loc[i, opt] = tmp.shape[0]
tmp = clustering[clustering['leiden'] == i]
- res.loc[i, 'Topic'] = f"Topic_{i + 1}"
+ res.loc[i, 'Topic'] = f"Topic_{i + 1}"
res.loc[i, 'keep'] = tmp.keep.unique()[0]
if not show_all:
@@ -465,8 +491,11 @@ Source code for Topyfic.utilsMakeModel
plt.close()
-[docs]def initialize_rLDA_model(all_components, all_exp_dirichlet_component, all_others, clusters):
- """
+
+
+[docs]
+def initialize_rLDA_model(all_components, all_exp_dirichlet_component, all_others, clusters):
+ """
Initialize reproducible LDA model by calculating all necessary attributes using clustering.
:param all_components: Variational parameters for topic gene distribution from all single LDA models
@@ -501,18 +530,18 @@ Source code for Topyfic.utilsMakeModel
topic = topic + 1
df = pd.DataFrame(np.transpose(components),
- columns=[f"Topic{i + 1}" for i in range(n_rtopics)],
+ columns=[f"Topic{i + 1}" for i in range(n_rtopics)],
index=all_components.columns)
for topic in range(n_rtopics):
- df_sorted = df.sort_values([f"Topic{topic + 1}"], axis=0, ascending=False)[f"Topic{topic + 1}"]
+ df_sorted = df.sort_values([f"Topic{topic + 1}"], axis=0, ascending=False)[f"Topic{topic + 1}"]
tmp = df_sorted.cumsum()
tmp = tmp[tmp > 0.9 * df_sorted.sum()]
- df.loc[tmp.index, f"Topic{topic + 1}"] = 1 / n_rtopics
+ df.loc[tmp.index, f"Topic{topic + 1}"] = 1 / n_rtopics
components = df.T
exp_dirichlet_component = pd.DataFrame(exp_dirichlet_component,
- index=[f"Topic{i + 1}" for i in range(n_rtopics)],
+ index=[f"Topic{i + 1}" for i in range(n_rtopics)],
columns=all_exp_dirichlet_component.columns)
others = all_others.mean(axis=0)
@@ -521,8 +550,11 @@ Source code for Topyfic.utilsMakeModel
return LDA
-[docs]def initialize_lda_model(components, exp_dirichlet_component, others):
- """
+
+
+[docs]
+def initialize_lda_model(components, exp_dirichlet_component, others):
+ """
Initialize LDA model by passing all necessary attributes
:param components: Variational parameters for topic gene distribution
@@ -551,8 +583,11 @@ Source code for Topyfic.utilsMakeModel
return LDA
-[docs]def filter_LDA_model(main_lda, keep):
- """
+
+
+[docs]
+def filter_LDA_model(main_lda, keep):
+ """
filter LDA based on the topics we want to keep
:param main_lda: Latent Dirichlet Allocation with online variational Bayes algorithm.
@@ -569,12 +604,12 @@ Source code for Topyfic.utilsMakeModel
components = main_lda.components_[[i for i in range(keep.shape[0]) if keep[i]], :]
df = pd.DataFrame(np.transpose(components),
- columns=[f'Topic{i + 1}' for i in range(n_topics)])
+ columns=[f'Topic{i + 1}' for i in range(n_topics)])
for topic in range(n_topics):
- df_sorted = df.sort_values([f'Topic{topic + 1}'], axis=0, ascending=False)[f'Topic{topic + 1}']
+ df_sorted = df.sort_values([f'Topic{topic + 1}'], axis=0, ascending=False)[f'Topic{topic + 1}']
tmp = df_sorted.cumsum()
tmp = tmp[tmp > 0.9 * df_sorted.sum()]
- df.loc[tmp.index, f'Topic{topic + 1}'] = 1 / n_topics
+ df.loc[tmp.index, f'Topic{topic + 1}'] = 1 / n_topics
components = df.T
@@ -590,8 +625,11 @@ Source code for Topyfic.utilsMakeModel
return lda, components
-[docs]def read_train(file):
- """
+
+
+[docs]
+def read_train(file):
+ """
reading train pickle file
:param file: path of the pickle file
@@ -651,8 +689,11 @@ Source code for Topyfic.utilsMakeModel
return train
-[docs]def read_topModel(file):
- """
+
+
+[docs]
+def read_topModel(file):
+ """
reading topModel pickle/HDF5 file
:param file: path of the pickle/HDF5 file
@@ -678,7 +719,7 @@ Source code for Topyfic.utilsMakeModel
# topics
topics = dict()
- topic_ids = [f'Topic_{i + 1}' for i in range(N)]
+ topic_ids = [f'Topic_{i + 1}' for i in range(N)]
for topic in topic_ids:
topic_id = np.string_(f['topics'][topic]['id']).decode('ascii')
topic_name = np.string_(f['topics'][topic]['name']).decode('ascii')
@@ -732,8 +773,11 @@ Source code for Topyfic.utilsMakeModel
return top_model
-[docs]def read_analysis(file):
- """
+
+
+[docs]
+def read_analysis(file):
+ """
reading analysis pickle file
:param file: path of the pickle file
@@ -752,11 +796,14 @@ Source code for Topyfic.utilsMakeModel
return analysis
-[docs]def read_model_yaml(model_yaml_path="model.yaml",
+
+
+[docs]
+def read_model_yaml(model_yaml_path="model.yaml",
topic_yaml_path=None,
cell_topic_participation_path=None,
save=True):
- """
+ """
read YMAL files and make topmodel object
write topic in YAML format
@@ -802,7 +849,7 @@ Source code for Topyfic.utilsMakeModel
topic_information = None
if 'Topic information' in topic_yaml.keys():
topic_information = pd.DataFrame(topic_yaml['Topic information'], index=topic_gene_weights.columns)
- topics[f"Topic_{i + 1}"] = Topic(topic_id=topic_id,
+ topics[f"Topic_{i + 1}"] = Topic(topic_id=topic_id,
topic_gene_weights=topic_gene_weights,
gene_information=gene_information,
topic_information=topic_information)
@@ -833,11 +880,14 @@ Source code for Topyfic.utilsMakeModel
return topModel, analysis
-[docs]def combine_topModels(topModels,
+
+
+[docs]
+def combine_topModels(topModels,
name="Combined_TopModel",
data=None,
min_cell_participation=None):
- """
+ """
Combine two topmodels. It will not apply any method when we want to combine them, so basically just combine all models without performing any method
:param topModels: list of topmodels you want to combine
@@ -865,7 +915,7 @@ Source code for Topyfic.utilsMakeModel
n_topics += topmodel.N
tmp = pd.DataFrame(topmodel.model.components_,
- index=[f'{topmodel.name}_Topic_{i + 1}' for i in
+ index=[f'{topmodel.name}_Topic_{i + 1}' for i in
range(topmodel.model.components_.shape[0])],
columns=topmodel.get_feature_name())
if components is None:
@@ -874,7 +924,7 @@ Source code for Topyfic.utilsMakeModel
components = pd.concat([components, tmp], axis=0)
tmp = pd.DataFrame(topmodel.model.exp_dirichlet_component_,
- index=[f'{topmodel.name}_Topic_{i + 1}' for i in
+ index=[f'{topmodel.name}_Topic_{i + 1}' for i in
range(topmodel.model.components_.shape[0])],
columns=topmodel.get_feature_name())
if exp_dirichlet_component is None:
@@ -913,7 +963,7 @@ Source code for Topyfic.utilsMakeModel
lda_output = model.transform(data.X)
cell_participation = pd.DataFrame(np.round(lda_output, 2),
- columns=[f"Topic{i + 1}" for i in range(n_topics)],
+ columns=[f"Topic{i + 1}" for i in range(n_topics)],
index=data.obs.index)
keep = cell_participation.sum() > min_cell_participation
@@ -922,7 +972,7 @@ Source code for Topyfic.utilsMakeModel
n_topics = keep.sum()
model, tmp = filter_LDA_model(model, keep)
components = pd.DataFrame(model.components_,
- index=[f'{name}_Topic_{i + 1}' for i in
+ index=[f'{name}_Topic_{i + 1}' for i in
range(model.components_.shape[0])],
columns=components.columns.tolist())
@@ -932,6 +982,7 @@ Source code for Topyfic.utilsMakeModel
model=model)
return top_model, n_topics, components.T
+
@@ -946,7 +997,7 @@ Source code for Topyfic.utilsMakeModel
diff --git a/docs/html/_modules/index.html b/docs/html/_modules/index.html
index 5320987..31c5cba 100644
--- a/docs/html/_modules/index.html
+++ b/docs/html/_modules/index.html
@@ -1,18 +1,16 @@
-
+
Overview: module code — Topyfic 0.0.11 documentation
-
-
-
-
-
-
-
-
+
+
+
+
+
+
@@ -101,7 +99,18 @@
-
+
+
+
+
All modules for which code is available
- Topyfic.analysis
@@ -124,7 +133,7 @@ All modules for which code is available
© Copyright 2022, Narges Rezaie.
- Created using Sphinx 5.0.2.
+ Created using Sphinx 8.0.2.
diff --git a/docs/html/_static/_sphinx_javascript_frameworks_compat.js b/docs/html/_static/_sphinx_javascript_frameworks_compat.js
deleted file mode 100644
index 8549469..0000000
--- a/docs/html/_static/_sphinx_javascript_frameworks_compat.js
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * _sphinx_javascript_frameworks_compat.js
- * ~~~~~~~~~~
- *
- * Compatability shim for jQuery and underscores.js.
- *
- * WILL BE REMOVED IN Sphinx 6.0
- * xref RemovedInSphinx60Warning
- *
- */
-
-/**
- * select a different prefix for underscore
- */
-$u = _.noConflict();
-
-
-/**
- * small helper function to urldecode strings
- *
- * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
- */
-jQuery.urldecode = function(x) {
- if (!x) {
- return x
- }
- return decodeURIComponent(x.replace(/\+/g, ' '));
-};
-
-/**
- * small helper function to urlencode strings
- */
-jQuery.urlencode = encodeURIComponent;
-
-/**
- * This function returns the parsed url parameters of the
- * current request. Multiple values per key are supported,
- * it will always return arrays of strings for the value parts.
- */
-jQuery.getQueryParameters = function(s) {
- if (typeof s === 'undefined')
- s = document.location.search;
- var parts = s.substr(s.indexOf('?') + 1).split('&');
- var result = {};
- for (var i = 0; i < parts.length; i++) {
- var tmp = parts[i].split('=', 2);
- var key = jQuery.urldecode(tmp[0]);
- var value = jQuery.urldecode(tmp[1]);
- if (key in result)
- result[key].push(value);
- else
- result[key] = [value];
- }
- return result;
-};
-
-/**
- * highlight a given string on a jquery object by wrapping it in
- * span elements with the given class name.
- */
-jQuery.fn.highlightText = function(text, className) {
- function highlight(node, addItems) {
- if (node.nodeType === 3) {
- var val = node.nodeValue;
- var pos = val.toLowerCase().indexOf(text);
- if (pos >= 0 &&
- !jQuery(node.parentNode).hasClass(className) &&
- !jQuery(node.parentNode).hasClass("nohighlight")) {
- var span;
- var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
- if (isInSVG) {
- span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
- } else {
- span = document.createElement("span");
- span.className = className;
- }
- span.appendChild(document.createTextNode(val.substr(pos, text.length)));
- node.parentNode.insertBefore(span, node.parentNode.insertBefore(
- document.createTextNode(val.substr(pos + text.length)),
- node.nextSibling));
- node.nodeValue = val.substr(0, pos);
- if (isInSVG) {
- var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
- var bbox = node.parentElement.getBBox();
- rect.x.baseVal.value = bbox.x;
- rect.y.baseVal.value = bbox.y;
- rect.width.baseVal.value = bbox.width;
- rect.height.baseVal.value = bbox.height;
- rect.setAttribute('class', className);
- addItems.push({
- "parent": node.parentNode,
- "target": rect});
- }
- }
- }
- else if (!jQuery(node).is("button, select, textarea")) {
- jQuery.each(node.childNodes, function() {
- highlight(this, addItems);
- });
- }
- }
- var addItems = [];
- var result = this.each(function() {
- highlight(this, addItems);
- });
- for (var i = 0; i < addItems.length; ++i) {
- jQuery(addItems[i].parent).before(addItems[i].target);
- }
- return result;
-};
-
-/*
- * backward compatibility for jQuery.browser
- * This will be supported until firefox bug is fixed.
- */
-if (!jQuery.browser) {
- jQuery.uaMatch = function(ua) {
- ua = ua.toLowerCase();
-
- var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
- /(webkit)[ \/]([\w.]+)/.exec(ua) ||
- /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
- /(msie) ([\w.]+)/.exec(ua) ||
- ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
- [];
-
- return {
- browser: match[ 1 ] || "",
- version: match[ 2 ] || "0"
- };
- };
- jQuery.browser = {};
- jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
-}
diff --git a/docs/html/_static/basic.css b/docs/html/_static/basic.css
index 7d5974c..f316efc 100644
--- a/docs/html/_static/basic.css
+++ b/docs/html/_static/basic.css
@@ -4,7 +4,7 @@
*
* Sphinx stylesheet -- basic theme.
*
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -236,17 +236,11 @@ div.body p, div.body dd, div.body li, div.body blockquote {
a.headerlink {
visibility: hidden;
}
-a.brackets:before,
-span.brackets > a:before{
- content: "[";
-}
-a.brackets:after,
-span.brackets > a:after {
- content: "]";
+a:visited {
+ color: #551A8B;
}
-
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
@@ -334,11 +328,17 @@ aside.sidebar {
p.sidebar-title {
font-weight: bold;
}
+
+nav.contents,
+aside.topic,
div.admonition, div.topic, blockquote {
clear: left;
}
/* -- topics ---------------------------------------------------------------- */
+
+nav.contents,
+aside.topic,
div.topic {
border: 1px solid #ccc;
padding: 7px;
@@ -377,6 +377,8 @@ div.body p.centered {
div.sidebar > :last-child,
aside.sidebar > :last-child,
+nav.contents > :last-child,
+aside.topic > :last-child,
div.topic > :last-child,
div.admonition > :last-child {
margin-bottom: 0;
@@ -384,6 +386,8 @@ div.admonition > :last-child {
div.sidebar::after,
aside.sidebar::after,
+nav.contents::after,
+aside.topic::after,
div.topic::after,
div.admonition::after,
blockquote::after {
@@ -609,25 +613,6 @@ ul.simple p {
margin-bottom: 0;
}
-/* Docutils 0.17 and older (footnotes & citations) */
-dl.footnote > dt,
-dl.citation > dt {
- float: left;
- margin-right: 0.5em;
-}
-
-dl.footnote > dd,
-dl.citation > dd {
- margin-bottom: 0em;
-}
-
-dl.footnote > dd:after,
-dl.citation > dd:after {
- content: "";
- clear: both;
-}
-
-/* Docutils 0.18+ (footnotes & citations) */
aside.footnote > span,
div.citation > span {
float: left;
@@ -652,8 +637,6 @@ div.citation > p:last-of-type:after {
clear: both;
}
-/* Footnotes & citations ends */
-
dl.field-list {
display: grid;
grid-template-columns: fit-content(30%) auto;
@@ -666,10 +649,6 @@ dl.field-list > dt {
padding-right: 5px;
}
-dl.field-list > dt:after {
- content: ":";
-}
-
dl.field-list > dd {
padding-left: 0.5em;
margin-top: 0em;
@@ -695,6 +674,16 @@ dd {
margin-left: 30px;
}
+.sig dd {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
+.sig dl {
+ margin-top: 0px;
+ margin-bottom: 0px;
+}
+
dl > dd:last-child,
dl > dd:last-child > :last-child {
margin-bottom: 0;
@@ -763,6 +752,14 @@ abbr, acronym {
cursor: help;
}
+.translated {
+ background-color: rgba(207, 255, 207, 0.2)
+}
+
+.untranslated {
+ background-color: rgba(255, 207, 207, 0.2)
+}
+
/* -- code displays --------------------------------------------------------- */
pre {
diff --git a/docs/html/_static/doctools.js b/docs/html/_static/doctools.js
index c3db08d..4d67807 100644
--- a/docs/html/_static/doctools.js
+++ b/docs/html/_static/doctools.js
@@ -4,12 +4,19 @@
*
* Base JavaScript utilities for all Sphinx HTML documentation.
*
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
"use strict";
+const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
+ "TEXTAREA",
+ "INPUT",
+ "SELECT",
+ "BUTTON",
+]);
+
const _ready = (callback) => {
if (document.readyState !== "loading") {
callback();
@@ -18,73 +25,11 @@ const _ready = (callback) => {
}
};
-/**
- * highlight a given string on a node by wrapping it in
- * span elements with the given class name.
- */
-const _highlight = (node, addItems, text, className) => {
- if (node.nodeType === Node.TEXT_NODE) {
- const val = node.nodeValue;
- const parent = node.parentNode;
- const pos = val.toLowerCase().indexOf(text);
- if (
- pos >= 0 &&
- !parent.classList.contains(className) &&
- !parent.classList.contains("nohighlight")
- ) {
- let span;
-
- const closestNode = parent.closest("body, svg, foreignObject");
- const isInSVG = closestNode && closestNode.matches("svg");
- if (isInSVG) {
- span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
- } else {
- span = document.createElement("span");
- span.classList.add(className);
- }
-
- span.appendChild(document.createTextNode(val.substr(pos, text.length)));
- parent.insertBefore(
- span,
- parent.insertBefore(
- document.createTextNode(val.substr(pos + text.length)),
- node.nextSibling
- )
- );
- node.nodeValue = val.substr(0, pos);
-
- if (isInSVG) {
- const rect = document.createElementNS(
- "http://www.w3.org/2000/svg",
- "rect"
- );
- const bbox = parent.getBBox();
- rect.x.baseVal.value = bbox.x;
- rect.y.baseVal.value = bbox.y;
- rect.width.baseVal.value = bbox.width;
- rect.height.baseVal.value = bbox.height;
- rect.setAttribute("class", className);
- addItems.push({ parent: parent, target: rect });
- }
- }
- } else if (node.matches && !node.matches("button, select, textarea")) {
- node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
- }
-};
-const _highlightText = (thisNode, text, className) => {
- let addItems = [];
- _highlight(thisNode, addItems, text, className);
- addItems.forEach((obj) =>
- obj.parent.insertAdjacentElement("beforebegin", obj.target)
- );
-};
-
/**
* Small JavaScript module for the documentation.
*/
const Documentation = {
init: () => {
- Documentation.highlightSearchWords();
Documentation.initDomainIndexTable();
Documentation.initOnKeyListeners();
},
@@ -126,51 +71,6 @@ const Documentation = {
Documentation.LOCALE = catalog.locale;
},
- /**
- * highlight the search words provided in the url in the text
- */
- highlightSearchWords: () => {
- const highlight =
- new URLSearchParams(window.location.search).get("highlight") || "";
- const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
- if (terms.length === 0) return; // nothing to do
-
- // There should never be more than one element matching "div.body"
- const divBody = document.querySelectorAll("div.body");
- const body = divBody.length ? divBody[0] : document.querySelector("body");
- window.setTimeout(() => {
- terms.forEach((term) => _highlightText(body, term, "highlighted"));
- }, 10);
-
- const searchBox = document.getElementById("searchbox");
- if (searchBox === null) return;
- searchBox.appendChild(
- document
- .createRange()
- .createContextualFragment(
- '' +
- '' +
- Documentation.gettext("Hide Search Matches") +
- "
"
- )
- );
- },
-
- /**
- * helper function to hide the search marks again
- */
- hideSearchWords: () => {
- document
- .querySelectorAll("#searchbox .highlight-link")
- .forEach((el) => el.remove());
- document
- .querySelectorAll("span.highlighted")
- .forEach((el) => el.classList.remove("highlighted"));
- const url = new URL(window.location);
- url.searchParams.delete("highlight");
- window.history.replaceState({}, "", url);
- },
-
/**
* helper function to focus on search bar
*/
@@ -210,15 +110,11 @@ const Documentation = {
)
return;
- const blacklistedElements = new Set([
- "TEXTAREA",
- "INPUT",
- "SELECT",
- "BUTTON",
- ]);
document.addEventListener("keydown", (event) => {
- if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements
- if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys
+ // bail for input elements
+ if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
+ // bail with special keys
+ if (event.altKey || event.ctrlKey || event.metaKey) return;
if (!event.shiftKey) {
switch (event.key) {
@@ -240,10 +136,6 @@ const Documentation = {
event.preventDefault();
}
break;
- case "Escape":
- if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
- Documentation.hideSearchWords();
- event.preventDefault();
}
}
diff --git a/docs/html/_static/documentation_options.js b/docs/html/_static/documentation_options.js
index e82e41f..3ba1e30 100644
--- a/docs/html/_static/documentation_options.js
+++ b/docs/html/_static/documentation_options.js
@@ -1,5 +1,4 @@
-var DOCUMENTATION_OPTIONS = {
- URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
+const DOCUMENTATION_OPTIONS = {
VERSION: '0.0.11',
LANGUAGE: 'en',
COLLAPSE_INDEX: false,
@@ -10,5 +9,5 @@ var DOCUMENTATION_OPTIONS = {
SOURCELINK_SUFFIX: '.txt',
NAVIGATION_WITH_KEYS: false,
SHOW_SEARCH_SUMMARY: true,
- ENABLE_SEARCH_SHORTCUTS: false,
+ ENABLE_SEARCH_SHORTCUTS: true,
};
\ No newline at end of file
diff --git a/docs/html/_static/graphviz.css b/docs/html/_static/graphviz.css
index 19e7afd..027576e 100644
--- a/docs/html/_static/graphviz.css
+++ b/docs/html/_static/graphviz.css
@@ -4,7 +4,7 @@
*
* Sphinx stylesheet -- graphviz extension.
*
- * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
diff --git a/docs/html/_static/jquery-3.6.0.js b/docs/html/_static/jquery-3.6.0.js
deleted file mode 100644
index fc6c299..0000000
--- a/docs/html/_static/jquery-3.6.0.js
+++ /dev/null
@@ -1,10881 +0,0 @@
-/*!
- * jQuery JavaScript Library v3.6.0
- * https://jquery.com/
- *
- * Includes Sizzle.js
- * https://sizzlejs.com/
- *
- * Copyright OpenJS Foundation and other contributors
- * Released under the MIT license
- * https://jquery.org/license
- *
- * Date: 2021-03-02T17:08Z
- */
-( function( global, factory ) {
-
- "use strict";
-
- if ( typeof module === "object" && typeof module.exports === "object" ) {
-
- // For CommonJS and CommonJS-like environments where a proper `window`
- // is present, execute the factory and get jQuery.
- // For environments that do not have a `window` with a `document`
- // (such as Node.js), expose a factory as module.exports.
- // This accentuates the need for the creation of a real `window`.
- // e.g. var jQuery = require("jquery")(window);
- // See ticket #14549 for more info.
- module.exports = global.document ?
- factory( global, true ) :
- function( w ) {
- if ( !w.document ) {
- throw new Error( "jQuery requires a window with a document" );
- }
- return factory( w );
- };
- } else {
- factory( global );
- }
-
-// Pass this if window is not defined yet
-} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) {
-
-// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1
-// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode
-// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common
-// enough that all such attempts are guarded in a try block.
-"use strict";
-
-var arr = [];
-
-var getProto = Object.getPrototypeOf;
-
-var slice = arr.slice;
-
-var flat = arr.flat ? function( array ) {
- return arr.flat.call( array );
-} : function( array ) {
- return arr.concat.apply( [], array );
-};
-
-
-var push = arr.push;
-
-var indexOf = arr.indexOf;
-
-var class2type = {};
-
-var toString = class2type.toString;
-
-var hasOwn = class2type.hasOwnProperty;
-
-var fnToString = hasOwn.toString;
-
-var ObjectFunctionString = fnToString.call( Object );
-
-var support = {};
-
-var isFunction = function isFunction( obj ) {
-
- // Support: Chrome <=57, Firefox <=52
- // In some browsers, typeof returns "function" for HTML