今天我们接着复现文章上周分享的文章《Spatial transcriptomics reveals human cortical layer and area specification》Figure2的内容,Figure2展示了展示了人类皮层分层和区域特异性,结合空间转录组数据与皮层深度分析,揭示了兴奋性神经元(EN)和抑制性神经元(IN)的分布模式。
原文链接:
https://www.nature.com/articles/s41586-025-09010-1#Sec2
数据下载地址:
https://zenodo.org/records/14422018
代码下载地址:
https://github.com/carsen-stringer/vizgen-postprocessing
作者识别出一组分布范围较窄的兴奋性神经元(EN)亚型,其空间分布在第34孕周(GW34)与六层皮层结构高度一致(图a),而在第22孕周(GW22)尽管尚无明显形态学分层,MERFISH 分析已可通过 H3 EN 亚型的分布精准划分出全部六层(图c),反映出皮层层级在发育早期即已被分子特征预设,先于形态学差异的显现。皮层自 GW15 起逐层构建,第4至第6层最早可见,随后在 GW20 形成第3层,与由内而外的经典发育顺序一致(图c)。不同于投射至丘脑的 EN-ET 亚型,皮质内投射型 EN-IT 亚型分布跨层,呈钟形分布趋势,造成不同层级间细胞类型交错混杂,即使在 GW34 形态分层清晰时仍未分隔(图e, f),这一特征延续至成人大脑。相比之下,抑制性神经元(IN)亚型在 GW15–GW22 期间尚未展现出层级特异性,表明其组织结构在中期妊娠尚未建立(图g)。
作者按照样本/脑区筛选数据,然后根据发育阶段(GW15/GW20/GW22/GW34)定义EN亚型颜色和类型(如EN-IT-L4-1),得出了每个样本/脑区的多张空间分布图。
import numpy as np
import scanpy as sc
import pandas as pd
import matplotlib.pyplot as plt
from multiprocessing import Pool
from matplotlib_scalebar.scalebar import ScaleBar
from itertools import repeat
import os
adata = sc.read('merscope_integrated_855.h5ad')
gw15 = ['UMB1367', 'UMB1117']
gw20 = ['FB080', 'FB121']
gw22 = ['FB123']
gw34 = ['UMB5900']
def make_plot(sample, region, fig):
adata1 = adata[(adata.obs['sample']==sample) & (adata.obs.region==region)].copy()
if fig=='layer_def':
ncol=1
if sample in gw15:
types = ['EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1']
colors = ['#0FF008', '#0832F0', '#F008EC']
color_dict = dict(zip(types, colors))
color_dict.update(dict(zip(np.setdiff1d(adata.obs.H3_annotation.unique(), types), ['grey']*len(np.setdiff1d(adata.obs.H3_annotation.unique(), types)))))
elif sample in gw20:
types = ['EN-IT-L2/3-A1', 'EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1']
colors = ["#F0A608", "#0FF008", '#0832F0', '#F008EC']
color_dict = dict(zip(types, colors))
color_dict.update(dict(zip(np.setdiff1d(adata.obs.H3_annotation.unique(), types), ['grey']*len(np.setdiff1d(adata.obs.H3_annotation.unique(), types)))))
elif sample in gw22:
types = ['EN-L2-1', 'EN-IT-L3-A', 'EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1']
colors = ["#EC2814", "#F0A608", "#0FF008", '#0832F0', '#F008EC']
color_dict = dict(zip(types, colors))
color_dict.update(dict(zip(np.setdiff1d(adata.obs.H3_annotation.unique(), types), ['grey']*len(np.setdiff1d(adata.obs.H3_annotation.unique(), types)))))
elif sample in gw34:
types = ['EN-L2-4', 'EN-IT-L3-late', 'EN-IT-L4-late', 'EN-ET-L5-1', 'EN-IT-L6-late']
colors = ["#EC2814", "#F0A608", "#0FF008", '#0832F0', '#F008EC']
color_dict = dict(zip(types, colors))
color_dict.update(dict(zip(np.setdiff1d(adata.obs.H3_annotation.unique(), types), ['grey']*len(np.setdiff1d(adata.obs.H3_annotation.unique(), types)))))
elif fig=='en_et':
ncol=2
types1 = ['EN-ET-L5-1', 'EN-ET-L6-V1', 'EN-ET-L5/6', 'EN-ET-L6-A', 'EN-ET-SP-early3', 'EN-ET-L6-early4', 'EN-ET-L6-P', 'EN-ET-SP-early4', 'EN-ET-SP-early2', 'EN-ET-SP-3', 'EN-ET-SP-A', 'EN-ET-SP-early1', 'EN-ET-L6-early1',
'EN-ET-L6-early3', 'EN-ET-SP-early5', 'EN-ET-SP-2', 'EN-ET-L6-early2', 'EN-ET-SP-1', 'EN-ET-SP-5', 'EN-ET-SP-4', 'EN-ET-SP-P2', 'EN-ET-SP-P1', 'EN-ET-SP-V1T2', 'EN-ET-L6-early5', 'EN-ET-SP-V1T1']
colors = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf',
'#9edae5', '#393b79', '#5254a3', '#6b6ecf', '#9c9ede', '#637939']
color_dict = dict(zip(types1, colors))
color_dict.update(dict(zip(np.setdiff1d(adata.obs.H3_annotation.unique(), types1), ['grey']*len(np.setdiff1d(adata.obs.H3_annotation.unique(), types1)))))
types = list(adata1.obs[adata1.obs.H1_annotation=='EN-ET'].H3_annotation.unique())
types = [ele for ele in types1 if ele in types]
elif fig=='en_it_deep':
ncol=2
types = ['EN-IT-L4-1', 'EN-IT-Hip', 'EN-IT-L6-2', 'EN-IT-L5-1', 'EN-IT-L4/5-early', 'EN-IT-L4/5-late', 'EN-IT-L4/5-1', 'EN-IT-L6-1', 'EN-IT-L6-late', 'EN-IT-L5/6-P']
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
color_dict = dict(zip(types, colors))
color_dict.update(dict(zip(np.setdiff1d(adata.obs.H3_annotation.unique(), types), ['grey']*len(np.setdiff1d(adata.obs.H3_annotation.unique(), types)))))
elif fig=='en_it_upper':
ncol=2
types = ['EN-IT-L3-V1', 'EN-IT-L2/3-A2', 'EN-IT-L3-A', 'EN-IT-L3/4-1', 'EN-IT-L2/3-A1', 'EN-IT-L3/4-early', 'EN-IT-L4-A', 'EN-IT-L4-V1', 'EN-IT-L3-late', 'EN-IT-L3/4-P', 'EN-IT-L3/4-P2', 'EN-IT-L3-P', 'EN-IT-L4-late',
'EN-IT-L3/4-T']
colors = ['#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2']
color_dict = dict(zip(types, colors))
color_dict.update(dict(zip(np.setdiff1d(adata.obs.H3_annotation.unique(), types), ['grey']*len(np.setdiff1d(adata.obs.H3_annotation.unique(), types)))))
plot = sc.pl.embedding(adata1, basis="spatial", color = 'H3_annotation', groups = types, show = False, s=2, palette = color_dict, alpha=1); plt.axis('off');
plot.set_aspect('equal');
handles, labels = plt.gca().get_legend_handles_labels();
order = [labels.index(i) for i in types];
plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order], loc = 'center', fontsize=2, ncol = ncol, bbox_to_anchor=(1.0,1.0), markerscale=0.25);
plot.get_figure().gca().set_title('');
scalebar = ScaleBar(1, "um", fixed_value=500, location = 'lower right');
plot.add_artist(scalebar);
plt.tight_layout();
output_dir = f"{sample}_{region}"
os.makedirs(output_dir, exist_ok=True)
plt.savefig(f"{output_dir}/{sample}_{region}_{fig}.png", dpi=500)
plt.clf()
figs = ['layer_def', 'en_et', 'en_it_deep', 'en_it_upper']
sections = ['UMB1117-F1a', 'UMB1367-O1', 'FB080-O1c', 'FB121-F1', 'FB123-F1', 'FB123-F2', 'FB123-F3', 'FB123-P1', 'FB123-O2', 'UMB5900-BA9', 'UMB5900-BA18']
def main():
for i in sections:
with Pool(4) as pool:
pool.starmap(make_plot, zip(repeat(i.split('-')[0]), repeat(i.split('-')[1]), figs))
if __name__=="__main__":
main()
figure2a:
figure2b:
figure 2c:
接着,作者针对FB123样本的F1(前额叶皮层)和O2(视觉皮层)区域,筛选特定EN亚型(如EN-ET-L5-1或EN-IT-L4-1)。
library(ggplot2)
library(reticulate)
anndata = import("anndata")
adata = anndata$read_h5ad("merscope_integrated_855.h5ad")
obs_all = adata$obs
obs = obs_all[obs_all$sample == "FB123" & obs_all$region == "F1", ]
obs = obs[!is.na(obs$cortical_depth), ]
types = c('EN-L2-1', 'EN-IT-L3-A', 'EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1')
obs1 = obs[obs$H3_annotation %in% c('EN-L2-1', 'EN-IT-L3-A', 'EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1'),]
obs1$H3_annotation = factor(obs1$H3_annotation, levels = c('EN-L2-1', 'EN-IT-L3-A', 'EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1'))
colors = c("#EC2814", "#F0A608", "#0FF008", '#0832F0', '#F008EC')
names(colors) = types
pdf(file = 'ridge_c_gw22_pfc.pdf', width=5.58, height=3.5)
print(ggplot(obs1, aes(x = cortical_depth, stat(count), color=H3_annotation, fill =H3_annotation )) +
geom_density(alpha=0.3)+ xlab('Cortical Depth') + ylab('Count') +
coord_flip() + scale_y_reverse() + scale_fill_manual(values=colors) + scale_color_manual(values=colors)+
geom_vline(xintercept=c(0.7090927649311176, 0.5404574294859591, 0.4336352966282663, 0.3039781775549624), linetype="dashed", color = "#1f77b4")+
theme(legend.title = element_blank(),legend.text = element_text(size = 5), panel.background = element_blank()))
dev.off()
types = c('EN-ET-L5-1', 'EN-ET-L6-V1', 'EN-ET-L5/6', 'EN-ET-L6-A', 'EN-ET-SP-early3', 'EN-ET-L6-early4', 'EN-ET-L6-P', 'EN-ET-SP-early4', 'EN-ET-SP-early2', 'EN-ET-SP-3', 'EN-ET-SP-A', 'EN-ET-SP-early1', 'EN-ET-L6-early1', 'EN-ET-L6-early3', 'EN-ET-SP-early5', 'EN-ET-SP-2', 'EN-ET-L6-early2', 'EN-ET-SP-1', 'EN-ET-SP-5', 'EN-ET-SP-4', 'EN-ET-SP-P2', 'EN-ET-SP-P1', 'EN-ET-SP-V1T2', 'EN-ET-L6-early5', 'EN-ET-SP-V1T1')
obs1 = obs[obs$H3_annotation %in% types,]
obs1$H3_annotation = factor(obs1$H3_annotation, levels = types)
colors = c('#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5', '#393b79', '#5254a3', '#6b6ecf', '#9c9ede', '#637939')
names(colors) = types
pdf(file = 'ridge_d_gw22_pfc.pdf', width=5.58, height=3.5)
print(ggplot(obs1, aes(x = cortical_depth, stat(count), color=H3_annotation, fill =H3_annotation )) +
geom_density(alpha=0.3)+ xlab('Cortical Depth') + ylab('Count') +
xlim(0,1) +
coord_flip() + scale_y_reverse() +
scale_fill_manual(values=colors) +
scale_color_manual(values=colors) +
geom_vline(xintercept=c(0.7090927649311176, 0.5404574294859591, 0.4336352966282663, 0.3039781775549624), linetype="dashed", color = "#1f77b4")+
theme(legend.title = element_blank(), legend.text = element_text(size = 5), panel.background = element_blank())+
guides(color = guide_legend(override.aes = list(size = 0.3))))
dev.off()
types = c('EN-IT-L4-1', 'EN-IT-Hip', 'EN-IT-L6-2', 'EN-IT-L5-1', 'EN-IT-L4/5-early', 'EN-IT-L4/5-late', 'EN-IT-L4/5-1', 'EN-IT-L6-1', 'EN-IT-L6-late', 'EN-IT-L5/6-P')
obs1 = obs[obs$H3_annotation %in% types,]
obs1$H3_annotation = factor(obs1$H3_annotation, levels = types)
colors = c('#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf')
names(colors) = types
pdf(file = 'ridge_e_gw22_pfc.pdf', width=5.58, height=3.5)
print(ggplot(obs1, aes(x = cortical_depth, stat(count), color=H3_annotation, fill =H3_annotation )) +
geom_density(alpha=0.3)+ xlab('Cortical Depth') + ylab('Count') +
coord_flip() + scale_y_reverse() +
scale_fill_manual(values=colors) +
scale_color_manual(values=colors) +
geom_vline(xintercept=c(0.7090927649311176, 0.5404574294859591, 0.4336352966282663, 0.3039781775549624), linetype="dashed", color = "#1f77b4")+
theme(legend.title = element_blank(), legend.text = element_text(size = 5), panel.background = element_blank())+
guides(color = guide_legend(override.aes = list(size = 0.3))))
dev.off()
types = c('EN-IT-L3-V1', 'EN-IT-L2/3-A2', 'EN-IT-L3-A', 'EN-IT-L3/4-1', 'EN-IT-L2/3-A1', 'EN-IT-L3/4-early', 'EN-IT-L4-A', 'EN-IT-L4-V1', 'EN-IT-L3-late', 'EN-IT-L3/4-P', 'EN-IT-L3/4-P2', 'EN-IT-L3-P', 'EN-IT-L4-late', 'EN-IT-L3/4-T')
obs1 = obs[obs$H3_annotation %in% types,]
obs1$H3_annotation = factor(obs1$H3_annotation, levels = types)
colors = c('#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2')
names(colors) = types
pdf(file = 'ridge_f_gw22_pfc.pdf', width=5.58, height=3.5)
print(ggplot(obs1, aes(x = cortical_depth, stat(count), color=H3_annotation, fill =H3_annotation )) +
geom_density(alpha=0.3)+ xlab('Cortical Depth') + ylab('Count') +
coord_flip() + scale_y_reverse() +
scale_fill_manual(values=colors) +
scale_color_manual(values=colors) +
geom_vline(xintercept=c(0.7090927649311176, 0.5404574294859591, 0.4336352966282663, 0.3039781775549624), linetype="dashed", color = "#1f77b4")+
theme(legend.title = element_blank(), legend.text = element_text(size = 5), panel.background = element_blank())+
guides(color = guide_legend(override.aes = list(size = 0.3))))
dev.off()
obs = obs_all[obs_all$sample == "FB123" & obs_all$region == "O2" & obs_all$area=='A-Occi', ]
obs = obs[!is.na(obs$cortical_depth), ]
types = c('EN-ET-L5-1', 'EN-ET-L6-V1', 'EN-ET-L5/6', 'EN-ET-L6-A', 'EN-ET-SP-early3', 'EN-ET-L6-early4', 'EN-ET-L6-P', 'EN-ET-SP-early4', 'EN-ET-SP-early2', 'EN-ET-SP-3', 'EN-ET-SP-A', 'EN-ET-SP-early1', 'EN-ET-L6-early1', 'EN-ET-L6-early3', 'EN-ET-SP-early5', 'EN-ET-SP-2', 'EN-ET-L6-early2', 'EN-ET-SP-1', 'EN-ET-SP-5', 'EN-ET-SP-4', 'EN-ET-SP-P2', 'EN-ET-SP-P1', 'EN-ET-SP-V1T2', 'EN-ET-L6-early5', 'EN-ET-SP-V1T1')
obs1 = obs[obs$H3_annotation %in% types,]
obs1$H3_annotation = factor(obs1$H3_annotation, levels = types)
colors = c('#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2', '#7f7f7f', '#c7c7c7', '#bcbd22', '#dbdb8d', '#17becf', '#9edae5', '#393b79', '#5254a3', '#6b6ecf', '#9c9ede', '#637939')
names(colors) = types
pdf(file = 'ridge_d_gw22_v2.pdf', width=5.58, height=3.5)
print(ggplot(obs1, aes(x = cortical_depth, stat(count), color=H3_annotation, fill =H3_annotation )) +
geom_density(alpha=0.3)+ xlab('Cortical Depth') + ylab('Count') +
xlim(0,1) +
coord_flip() + scale_y_reverse() +
scale_fill_manual(values=colors) +
scale_color_manual(values=colors) +
geom_vline(xintercept=c(0.7868768296116182, 0.6575388963412806, 0.5134491993982202, 0.35806069002273433), linetype="dashed", color = "#1f77b4")+
theme(legend.title = element_blank(), legend.text = element_text(size = 5),panel.background = element_blank())+
guides(color = guide_legend(override.aes = list(size = 0.3))))
dev.off()
types = c('EN-IT-L4-1', 'EN-IT-Hip', 'EN-IT-L6-2', 'EN-IT-L5-1', 'EN-IT-L4/5-early', 'EN-IT-L4/5-late', 'EN-IT-L4/5-1', 'EN-IT-L6-1', 'EN-IT-L6-late', 'EN-IT-L5/6-P')
obs1 = obs[obs$H3_annotation %in% types,]
obs1$H3_annotation = factor(obs1$H3_annotation, levels = types)
colors = c('#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf')
names(colors) = types
pdf(file = 'ridge_e_gw22_v2.pdf', width=5.58, height=3.5)
print(ggplot(obs1, aes(x = cortical_depth, stat(count), color=H3_annotation, fill =H3_annotation )) +
geom_density(alpha=0.3)+ xlab('Cortical Depth') + ylab('Count') +
coord_flip() + scale_y_reverse() +
scale_fill_manual(values=colors) +
scale_color_manual(values=colors) +
geom_vline(xintercept=c(0.7868768296116182, 0.6575388963412806, 0.5134491993982202, 0.35806069002273433), linetype="dashed", color = "#1f77b4")+
theme(legend.title = element_blank(), legend.text = element_text(size = 5),panel.background = element_blank())+
guides(color = guide_legend(override.aes = list(size = 0.3))))
dev.off()
types = c('EN-IT-L3-V1', 'EN-IT-L2/3-A2', 'EN-IT-L3-A', 'EN-IT-L3/4-1', 'EN-IT-L2/3-A1', 'EN-IT-L3/4-early', 'EN-IT-L4-A', 'EN-IT-L4-V1', 'EN-IT-L3-late', 'EN-IT-L3/4-P', 'EN-IT-L3/4-P2', 'EN-IT-L3-P', 'EN-IT-L4-late', 'EN-IT-L3/4-T')
obs1 = obs[obs$H3_annotation %in% types,]
obs1$H3_annotation = factor(obs1$H3_annotation, levels = types)
colors = c('#1f77b4', '#aec7e8', '#ff7f0e', '#ffbb78', '#2ca02c', '#98df8a', '#d62728', '#ff9896', '#9467bd', '#c5b0d5', '#8c564b', '#c49c94', '#e377c2', '#f7b6d2')
names(colors) = types
pdf(file = 'ridge_f_gw22_v2.pdf', width=5.58, height=3.5)
print(ggplot(obs1, aes(x = cortical_depth, stat(count), color=H3_annotation, fill =H3_annotation )) +
geom_density(alpha=0.3)+ xlab('Cortical Depth') + ylab('Count') +
coord_flip() + scale_y_reverse() +
scale_fill_manual(values=colors) +
scale_color_manual(values=colors) +
geom_vline(xintercept=c(0.7868768296116182, 0.6575388963412806, 0.5134491993982202, 0.35806069002273433), linetype="dashed", color = "#1f77b4")+
theme(legend.title = element_blank(), legend.text = element_text(size = 5),panel.background = element_blank())+
guides(color = guide_legend(override.aes = list(size = 0.3))))
dev.off()
然后作者筛选IN亚型(H1_annotation=='IN'),按皮层深度绘制小提琴图。根据EN亚型分布动态计算层边界(如l4_5 = (q25 + q75)/2)。 对稀疏亚型(<10 cells)用散点图替代小提琴图。
import seaborn as sns
import matplotlib.pyplot as plt
from multiprocessing import Pool
import numpy as np
adata = sc.read('merscope_integrated_855.h5ad')
obs_all = adata.obs
gw15 = ['UMB1367', 'UMB1117']
gw20 = ['FB080', 'FB121']
gw22 = ['FB123']
gw34 = ['UMB5900']
order = np.array(['IN-CGE4', 'IN-CGE1', 'INP-LGE1', 'IN-CGE5', 'IN-CGE2', 'IN-VZ/LGE2', 'IN-CGE3', 'IN-MGE4', 'IN-MGE2', 'INP-VZ/GE1', 'IN-SST-NPY', 'IN-VIP-late', 'INP-VZ', 'IN-GE1', 'IN-MGE5', 'IN-GE2', 'INP-LGE2', 'IN-SST3', 'IN-VZ/LGE1', 'IN-MGE1', 'IN-SST1', 'INP-VZ/GE2', 'IN-SST4', 'IN-MGE3'])
def make_plot(sample, region, area):
obs = obs_all[(obs_all['sample']==sample) & (obs_all.region==region) & (obs_all.area==area)]
obs = obs[obs['cortical_depth'].notna()]
obs_en = obs[obs.H1_annotation.isin(['EN-IT', 'EN-Mig', 'EN-ET'])]
obs_in = obs[obs.H1_annotation=='IN']
type_rm = list(obs.value_counts('H3_annotation').index[(obs.value_counts('H3_annotation')<10)])
if sample in gw15:
layer_types = ['EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1']
l4_5 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[0]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[1]].cortical_depth, 0.75))/2
l5_6 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[1]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[2]].cortical_depth, 0.75))/2
l = [l4_5,l5_6]
elif sample in gw20:
layer_types = ['EN-IT-L2/3-A1', 'EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1']
l3_4 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[0]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[1]].cortical_depth, 0.75))/2
l4_5 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[1]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[2]].cortical_depth, 0.75))/2
l5_6 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[2]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[3]].cortical_depth, 0.75))/2
l = [l3_4,l4_5,l5_6]
elif sample in gw22:
layer_types = ['EN-L2-1', 'EN-IT-L3-A', 'EN-IT-L4-1', 'EN-ET-L5-1', 'EN-IT-L6-1']
l2_3 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[0]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[1]].cortical_depth, 0.75))/2
l3_4 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[1]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[2]].cortical_depth, 0.75))/2
l4_5 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[2]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[3]].cortical_depth, 0.75))/2
l5_6 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[3]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[4]].cortical_depth, 0.75))/2
l = [l2_3,l3_4,l4_5,l5_6]
elif sample in gw34:
layer_types = ['EN-L2-4', 'EN-IT-L3-late', 'EN-IT-L4-late', 'EN-ET-L5-1', 'EN-IT-L6-late']
l2_3 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[0]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[1]].cortical_depth, 0.75))/2
l3_4 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[1]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[2]].cortical_depth, 0.75))/2
l4_5 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[2]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[3]].cortical_depth, 0.75))/2
l5_6 = (np.quantile(obs_en[obs_en.H3_annotation==layer_types[3]].cortical_depth, 0.25)+np.quantile(obs_en[obs_en.H3_annotation==layer_types[4]].cortical_depth, 0.75))/2
l = [l2_3,l3_4,l4_5,l5_6]
obs_in.H3_annotation = obs_in.H3_annotation.astype('category').cat.set_categories(order)
if ((len(type_rm)>0) & (len(type_rm)<len(obs_in.H3_annotation.unique()))):
obs_in_notrm = obs_in[~obs_in.H3_annotation.isin(type_rm)]
obs_in_rm = obs_in[obs_in.H3_annotation.isin(type_rm)]
#order = list(obs.groupby('H3_annotation').aggregate('median').sort_values(by='cp_dist').index)
plt.figure(figsize=(35,7));
plot = sns.violinplot(x='H3_annotation', y='cortical_depth', hue='H3_annotation', data=obs_in_notrm, order=order, palette='rainbow', density_norm='width', inner = None, dodge=False, cut=0);
sns.stripplot(x='H3_annotation', y='cortical_depth', hue='H3_annotation', data=obs_in_rm, order=order, palette='rainbow');
#plot.axhline(y=l2_3); plot.axhline(y=l3_4); plot.axhline(y=l4_5); plot.axhline(y=l5_6);
[plot.axhline(i, linestyle = '--') for i in l];
plot.legend().remove(); plt.xticks(rotation=90, fontsize=9); plt.yticks(fontsize=9); plot.set(xlabel=None); plot.set_ylabel('Cortical Depth', fontsize=20); plt.ylim(0,1);
plt.tight_layout();
plt.savefig(sample + '-' + region + '-' + area + 'in_violin.png', dpi=200, bbox_to_inches = 'tight', pad_inches=0)
elif ((len(type_rm)>0) & (len(type_rm)==len(obs_in.H3_annotation.unique()))):
#order = list(obs.groupby('H3_annotation').aggregate('median').sort_values(by='cp_dist').index)
plt.figure(figsize=(35,7));
#plot = sns.violinplot(x='H3_annotation', y='cortical_depth', hue='H3_annotation', data=obs2, order=order, palette='rainbow', density_norm='width', inner = None, dodge=False, cut=0);
plot = sns.stripplot(x='H3_annotation', y='cortical_depth', hue='H3_annotation', data=obs_in, order=order, palette='rainbow');
#plot.axhline(y=l2_3); plot.axhline(y=l3_4); plot.axhline(y=l4_5); plot.axhline(y=l5_6);
[plot.axhline(i, linestyle = '--') for i in l];
plot.legend().remove(); plt.xticks(rotation=90, fontsize=9); plt.yticks(fontsize=9); plot.set(xlabel=None); plot.set_ylabel('Cortical Depth', fontsize=20); plt.ylim(0,1);
plt.tight_layout();
plt.savefig(sample + '-' + region + '-' + area + 'in_violin.png', dpi=200, bbox_to_inches = 'tight', pad_inches=0)
else:
#order = list(obs.groupby('H3_annotation').aggregate('median').sort_values(by='cp_dist').index)
plt.figure(figsize=(35,7));
plot = sns.violinplot(x='H3_annotation', y='cortical_depth', hue='H3_annotation', data=obs_in, order=order, palette='rainbow', density_norm='width', inner = None, dodge=False, cut=0);
#plot.axhline(y=l2_3); plot.axhline(y=l3_4); plot.axhline(y=l4_5); plot.axhline(y=l5_6);
[plot.axhline(i, linestyle = '--') for i in l];
plot.legend().remove(); plt.xticks(rotation=90, fontsize=9); plt.yticks(fontsize=9); plot.set(xlabel=None); plot.set_ylabel('Cortical Depth', fontsize=20); plt.ylim(0,1);
plt.tight_layout();
plt.savefig(sample + '-' + region + '-' + area + 'in_violin.png', dpi=200, bbox_to_inches = 'tight', pad_inches=0)
samples_regions_areas = (adata.obs['sample'].astype('str') + '_' + adata.obs.region.astype('str')+ '_' + adata.obs.area.astype('str')).unique()
samples = [i.split('_')[0] for i in samples_regions_areas]
regions = [i.split('_')[1] for i in samples_regions_areas]
areas = [i.split('_')[2] for i in samples_regions_areas]
def main():
with Pool(12) as pool:
pool.starmap(make_plot, zip(samples, regions, areas))
if __name__=="__main__":
main()
通过三个脚本的分工协作,作者从空间分布、深度密度分布和抑制性神经元分布三个角度,系统揭示了人类皮层分层和区域特异性。复现时需注意数据筛选、颜色映射和可视化参数的细节调整。代码逻辑清晰,适合扩展至其他空间转录组数据集!