Mercurial > repos > davidvanzessen > argalaxy_tools
comparison report_clonality/RScript.r @ 0:f90fbc15b35a draft
Uploaded
author | davidvanzessen |
---|---|
date | Thu, 16 Jul 2015 08:30:43 -0400 |
parents | |
children | edbf4fba5fc7 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:f90fbc15b35a |
---|---|
1 # ---------------------- load/install packages ---------------------- | |
2 | |
3 if (!("gridExtra" %in% rownames(installed.packages()))) { | |
4 install.packages("gridExtra", repos="http://cran.xl-mirror.nl/") | |
5 } | |
6 library(gridExtra) | |
7 if (!("ggplot2" %in% rownames(installed.packages()))) { | |
8 install.packages("ggplot2", repos="http://cran.xl-mirror.nl/") | |
9 } | |
10 library(ggplot2) | |
11 if (!("plyr" %in% rownames(installed.packages()))) { | |
12 install.packages("plyr", repos="http://cran.xl-mirror.nl/") | |
13 } | |
14 library(plyr) | |
15 | |
16 if (!("data.table" %in% rownames(installed.packages()))) { | |
17 install.packages("data.table", repos="http://cran.xl-mirror.nl/") | |
18 } | |
19 library(data.table) | |
20 | |
21 if (!("reshape2" %in% rownames(installed.packages()))) { | |
22 install.packages("reshape2", repos="http://cran.xl-mirror.nl/") | |
23 } | |
24 library(reshape2) | |
25 | |
26 if (!("lymphclon" %in% rownames(installed.packages()))) { | |
27 install.packages("lymphclon", repos="http://cran.xl-mirror.nl/") | |
28 } | |
29 library(lymphclon) | |
30 | |
31 # ---------------------- parameters ---------------------- | |
32 | |
33 args <- commandArgs(trailingOnly = TRUE) | |
34 | |
35 infile = args[1] #path to input file | |
36 outfile = args[2] #path to output file | |
37 outdir = args[3] #path to output folder (html/images/data) | |
38 clonaltype = args[4] #clonaltype definition, or 'none' for no unique filtering | |
39 ct = unlist(strsplit(clonaltype, ",")) | |
40 species = args[5] #human or mouse | |
41 locus = args[6] # IGH, IGK, IGL, TRB, TRA, TRG or TRD | |
42 filterproductive = ifelse(args[7] == "yes", T, F) #should unproductive sequences be filtered out? (yes/no) | |
43 clonality_method = args[8] | |
44 | |
45 # ---------------------- Data preperation ---------------------- | |
46 | |
47 inputdata = read.table(infile, sep="\t", header=TRUE, fill=T, comment.char="") | |
48 | |
49 setwd(outdir) | |
50 | |
51 # remove weird rows | |
52 inputdata = inputdata[inputdata$Sample != "",] | |
53 | |
54 #remove the allele from the V,D and J genes | |
55 inputdata$Top.V.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.V.Gene) | |
56 inputdata$Top.D.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.D.Gene) | |
57 inputdata$Top.J.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.J.Gene) | |
58 | |
59 inputdata$clonaltype = 1:nrow(inputdata) | |
60 | |
61 PRODF = inputdata | |
62 UNPROD = inputdata | |
63 if(filterproductive){ | |
64 if("Functionality" %in% colnames(inputdata)) { # "Functionality" is an IMGT column | |
65 PRODF = inputdata[inputdata$Functionality == "productive" | inputdata$Functionality == "productive (see comment)", ] | |
66 UNPROD = inputdata[!(inputdata$Functionality == "productive" | inputdata$Functionality == "productive (see comment)"), ] | |
67 } else { | |
68 PRODF = inputdata[inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" , ] | |
69 UNPROD = inputdata[!(inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" ), ] | |
70 } | |
71 } | |
72 | |
73 clonalityFrame = PRODF | |
74 | |
75 #remove duplicates based on the clonaltype | |
76 if(clonaltype != "none"){ | |
77 clonaltype = paste(clonaltype, ",Sample", sep="") #add sample column to clonaltype, unique within samples | |
78 PRODF$clonaltype = do.call(paste, c(PRODF[unlist(strsplit(clonaltype, ","))], sep = ":")) | |
79 PRODF = PRODF[!duplicated(PRODF$clonaltype), ] | |
80 | |
81 UNPROD$clonaltype = do.call(paste, c(UNPROD[unlist(strsplit(clonaltype, ","))], sep = ":")) | |
82 UNPROD = UNPROD[!duplicated(UNPROD$clonaltype), ] | |
83 | |
84 #again for clonalityFrame but with sample+replicate | |
85 clonalityFrame$clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(clonaltype, ","))], sep = ":")) | |
86 clonalityFrame$clonality_clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(paste(clonaltype, ",Replicate", sep=""), ","))], sep = ":")) | |
87 clonalityFrame = clonalityFrame[!duplicated(clonalityFrame$clonality_clonaltype), ] | |
88 } | |
89 | |
90 PRODF$freq = 1 | |
91 | |
92 if(any(grepl(pattern="_", x=PRODF$ID))){ #the frequency can be stored in the ID with the pattern ".*_freq_.*" | |
93 PRODF$freq = gsub("^[0-9]+_", "", PRODF$ID) | |
94 PRODF$freq = gsub("_.*", "", PRODF$freq) | |
95 PRODF$freq = as.numeric(PRODF$freq) | |
96 if(any(is.na(PRODF$freq))){ #if there was an "_" in the ID, but not the frequency, go back to frequency of 1 for every sequence | |
97 PRODF$freq = 1 | |
98 } | |
99 } | |
100 | |
101 | |
102 | |
103 #write the complete dataset that is left over, will be the input if 'none' for clonaltype and 'no' for filterproductive | |
104 write.table(PRODF, "allUnique.csv", sep=",",quote=F,row.names=F,col.names=T) | |
105 write.table(UNPROD, "allUnproductive.csv", sep=",",quote=F,row.names=F,col.names=T) | |
106 | |
107 #write the samples to a file | |
108 sampleFile <- file("samples.txt") | |
109 un = unique(inputdata$Sample) | |
110 un = paste(un, sep="\n") | |
111 writeLines(un, sampleFile) | |
112 close(sampleFile) | |
113 | |
114 # ---------------------- Counting the productive/unproductive and unique sequences ---------------------- | |
115 | |
116 inputdata.dt = data.table(inputdata) #for speed | |
117 | |
118 if(clonaltype == "none"){ | |
119 ct = c("clonaltype") | |
120 } | |
121 | |
122 inputdata.dt$samples_replicates = paste(inputdata.dt$Sample, inputdata.dt$Replicate, sep="_") | |
123 samples_replicates = c(unique(inputdata.dt$samples_replicates), unique(as.character(inputdata.dt$Sample))) | |
124 frequency_table = data.frame(ID = samples_replicates[order(samples_replicates)]) | |
125 | |
126 | |
127 sample_productive_count = inputdata.dt[, list(All=.N, | |
128 Productive = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",]), | |
129 perc_prod = 1, | |
130 Productive_unique = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",list(count=.N),by=ct]), | |
131 perc_prod_un = 1, | |
132 Unproductive= nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",]), | |
133 perc_unprod = 1, | |
134 Unproductive_unique =nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",list(count=.N),by=ct]), | |
135 perc_unprod_un = 1), | |
136 by=c("Sample")] | |
137 | |
138 sample_productive_count$perc_prod = round(sample_productive_count$Productive / sample_productive_count$All * 100) | |
139 sample_productive_count$perc_prod_un = round(sample_productive_count$Productive_unique / sample_productive_count$All * 100) | |
140 | |
141 sample_productive_count$perc_unprod = round(sample_productive_count$Unproductive / sample_productive_count$All * 100) | |
142 sample_productive_count$perc_unprod_un = round(sample_productive_count$Unproductive_unique / sample_productive_count$All * 100) | |
143 | |
144 | |
145 sample_replicate_productive_count = inputdata.dt[, list(All=.N, | |
146 Productive = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",]), | |
147 perc_prod = 1, | |
148 Productive_unique = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",list(count=.N),by=ct]), | |
149 perc_prod_un = 1, | |
150 Unproductive= nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",]), | |
151 perc_unprod = 1, | |
152 Unproductive_unique =nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",list(count=.N),by=ct]), | |
153 perc_unprod_un = 1), | |
154 by=c("samples_replicates")] | |
155 | |
156 sample_replicate_productive_count$perc_prod = round(sample_replicate_productive_count$Productive / sample_replicate_productive_count$All * 100) | |
157 sample_replicate_productive_count$perc_prod_un = round(sample_replicate_productive_count$Productive_unique / sample_replicate_productive_count$All * 100) | |
158 | |
159 sample_replicate_productive_count$perc_unprod = round(sample_replicate_productive_count$Unproductive / sample_replicate_productive_count$All * 100) | |
160 sample_replicate_productive_count$perc_unprod_un = round(sample_replicate_productive_count$Unproductive_unique / sample_replicate_productive_count$All * 100) | |
161 | |
162 setnames(sample_replicate_productive_count, colnames(sample_productive_count)) | |
163 | |
164 counts = rbind(sample_replicate_productive_count, sample_productive_count) | |
165 counts = counts[order(counts$Sample),] | |
166 | |
167 write.table(x=counts, file="productive_counting.txt", sep=",",quote=F,row.names=F,col.names=F) | |
168 | |
169 # ---------------------- Frequency calculation for V, D and J ---------------------- | |
170 | |
171 PRODFV = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.V.Gene")]) | |
172 Total = ddply(PRODFV, .(Sample), function(x) data.frame(Total = sum(x$Length))) | |
173 PRODFV = merge(PRODFV, Total, by.x='Sample', by.y='Sample', all.x=TRUE) | |
174 PRODFV = ddply(PRODFV, c("Sample", "Top.V.Gene"), summarise, relFreq= (Length*100 / Total)) | |
175 | |
176 PRODFD = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.D.Gene")]) | |
177 Total = ddply(PRODFD, .(Sample), function(x) data.frame(Total = sum(x$Length))) | |
178 PRODFD = merge(PRODFD, Total, by.x='Sample', by.y='Sample', all.x=TRUE) | |
179 PRODFD = ddply(PRODFD, c("Sample", "Top.D.Gene"), summarise, relFreq= (Length*100 / Total)) | |
180 | |
181 PRODFJ = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.J.Gene")]) | |
182 Total = ddply(PRODFJ, .(Sample), function(x) data.frame(Total = sum(x$Length))) | |
183 PRODFJ = merge(PRODFJ, Total, by.x='Sample', by.y='Sample', all.x=TRUE) | |
184 PRODFJ = ddply(PRODFJ, c("Sample", "Top.J.Gene"), summarise, relFreq= (Length*100 / Total)) | |
185 | |
186 # ---------------------- Setting up the gene names for the different species/loci ---------------------- | |
187 | |
188 Vchain = "" | |
189 Dchain = "" | |
190 Jchain = "" | |
191 | |
192 if(species == "custom"){ | |
193 print("Custom genes: ") | |
194 splt = unlist(strsplit(locus, ";")) | |
195 print(paste("V:", splt[1])) | |
196 print(paste("D:", splt[2])) | |
197 print(paste("J:", splt[3])) | |
198 | |
199 Vchain = unlist(strsplit(splt[1], ",")) | |
200 Vchain = data.frame(v.name = Vchain, chr.orderV = 1:length(Vchain)) | |
201 | |
202 Dchain = unlist(strsplit(splt[2], ",")) | |
203 if(length(Dchain) > 0){ | |
204 Dchain = data.frame(v.name = Dchain, chr.orderD = 1:length(Dchain)) | |
205 } else { | |
206 Dchain = data.frame(v.name = character(0), chr.orderD = numeric(0)) | |
207 } | |
208 | |
209 Jchain = unlist(strsplit(splt[3], ",")) | |
210 Jchain = data.frame(v.name = Jchain, chr.orderJ = 1:length(Jchain)) | |
211 | |
212 } else { | |
213 genes = read.table("genes.txt", sep="\t", header=TRUE, fill=T, comment.char="") | |
214 | |
215 Vchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "V",c("IMGT.GENE.DB", "chr.order")] | |
216 colnames(Vchain) = c("v.name", "chr.orderV") | |
217 Dchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "D",c("IMGT.GENE.DB", "chr.order")] | |
218 colnames(Dchain) = c("v.name", "chr.orderD") | |
219 Jchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "J",c("IMGT.GENE.DB", "chr.order")] | |
220 colnames(Jchain) = c("v.name", "chr.orderJ") | |
221 } | |
222 useD = TRUE | |
223 if(nrow(Dchain) == 0){ | |
224 useD = FALSE | |
225 cat("No D Genes in this species/locus") | |
226 } | |
227 print(paste("useD:", useD)) | |
228 | |
229 # ---------------------- merge with the frequency count ---------------------- | |
230 | |
231 PRODFV = merge(PRODFV, Vchain, by.x='Top.V.Gene', by.y='v.name', all.x=TRUE) | |
232 | |
233 PRODFD = merge(PRODFD, Dchain, by.x='Top.D.Gene', by.y='v.name', all.x=TRUE) | |
234 | |
235 PRODFJ = merge(PRODFJ, Jchain, by.x='Top.J.Gene', by.y='v.name', all.x=TRUE) | |
236 | |
237 # ---------------------- Create the V, D and J frequency plots and write the data.frame for every plot to a file ---------------------- | |
238 | |
239 pV = ggplot(PRODFV) | |
240 pV = pV + geom_bar( aes( x=factor(reorder(Top.V.Gene, chr.orderV)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) | |
241 pV = pV + xlab("Summary of V gene") + ylab("Frequency") + ggtitle("Relative frequency of V gene usage") | |
242 write.table(x=PRODFV, file="VFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
243 | |
244 png("VPlot.png",width = 1280, height = 720) | |
245 pV | |
246 dev.off(); | |
247 | |
248 if(useD){ | |
249 pD = ggplot(PRODFD) | |
250 pD = pD + geom_bar( aes( x=factor(reorder(Top.D.Gene, chr.orderD)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) | |
251 pD = pD + xlab("Summary of D gene") + ylab("Frequency") + ggtitle("Relative frequency of D gene usage") | |
252 write.table(x=PRODFD, file="DFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
253 | |
254 png("DPlot.png",width = 800, height = 600) | |
255 print(pD) | |
256 dev.off(); | |
257 } | |
258 | |
259 pJ = ggplot(PRODFJ) | |
260 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) | |
261 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage") | |
262 write.table(x=PRODFJ, file="JFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
263 | |
264 png("JPlot.png",width = 800, height = 600) | |
265 pJ | |
266 dev.off(); | |
267 | |
268 pJ = ggplot(PRODFJ) | |
269 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) | |
270 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage") | |
271 write.table(x=PRODFJ, file="JFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
272 | |
273 png("JPlot.png",width = 800, height = 600) | |
274 pJ | |
275 dev.off(); | |
276 | |
277 # ---------------------- Now the frequency plots of the V, D and J families ---------------------- | |
278 | |
279 VGenes = PRODF[,c("Sample", "Top.V.Gene")] | |
280 VGenes$Top.V.Gene = gsub("-.*", "", VGenes$Top.V.Gene) | |
281 VGenes = data.frame(data.table(VGenes)[, list(Count=.N), by=c("Sample", "Top.V.Gene")]) | |
282 TotalPerSample = data.frame(data.table(VGenes)[, list(total=sum(.SD$Count)), by=Sample]) | |
283 VGenes = merge(VGenes, TotalPerSample, by="Sample") | |
284 VGenes$Frequency = VGenes$Count * 100 / VGenes$total | |
285 VPlot = ggplot(VGenes) | |
286 VPlot = VPlot + geom_bar(aes( x = Top.V.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
287 ggtitle("Distribution of V gene families") + | |
288 ylab("Percentage of sequences") | |
289 png("VFPlot.png") | |
290 VPlot | |
291 dev.off(); | |
292 write.table(x=VGenes, file="VFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
293 | |
294 if(useD){ | |
295 DGenes = PRODF[,c("Sample", "Top.D.Gene")] | |
296 DGenes$Top.D.Gene = gsub("-.*", "", DGenes$Top.D.Gene) | |
297 DGenes = data.frame(data.table(DGenes)[, list(Count=.N), by=c("Sample", "Top.D.Gene")]) | |
298 TotalPerSample = data.frame(data.table(DGenes)[, list(total=sum(.SD$Count)), by=Sample]) | |
299 DGenes = merge(DGenes, TotalPerSample, by="Sample") | |
300 DGenes$Frequency = DGenes$Count * 100 / DGenes$total | |
301 DPlot = ggplot(DGenes) | |
302 DPlot = DPlot + geom_bar(aes( x = Top.D.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
303 ggtitle("Distribution of D gene families") + | |
304 ylab("Percentage of sequences") | |
305 png("DFPlot.png") | |
306 print(DPlot) | |
307 dev.off(); | |
308 write.table(x=DGenes, file="DFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
309 } | |
310 | |
311 JGenes = PRODF[,c("Sample", "Top.J.Gene")] | |
312 JGenes$Top.J.Gene = gsub("-.*", "", JGenes$Top.J.Gene) | |
313 JGenes = data.frame(data.table(JGenes)[, list(Count=.N), by=c("Sample", "Top.J.Gene")]) | |
314 TotalPerSample = data.frame(data.table(JGenes)[, list(total=sum(.SD$Count)), by=Sample]) | |
315 JGenes = merge(JGenes, TotalPerSample, by="Sample") | |
316 JGenes$Frequency = JGenes$Count * 100 / JGenes$total | |
317 JPlot = ggplot(JGenes) | |
318 JPlot = JPlot + geom_bar(aes( x = Top.J.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
319 ggtitle("Distribution of J gene families") + | |
320 ylab("Percentage of sequences") | |
321 png("JFPlot.png") | |
322 JPlot | |
323 dev.off(); | |
324 write.table(x=JGenes, file="JFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
325 | |
326 # ---------------------- Plotting the cdr3 length ---------------------- | |
327 | |
328 CDR3Length = data.frame(data.table(PRODF)[, list(Count=.N), by=c("Sample", "CDR3.Length.DNA")]) | |
329 TotalPerSample = data.frame(data.table(CDR3Length)[, list(total=sum(.SD$Count)), by=Sample]) | |
330 CDR3Length = merge(CDR3Length, TotalPerSample, by="Sample") | |
331 CDR3Length$Frequency = CDR3Length$Count * 100 / CDR3Length$total | |
332 CDR3LengthPlot = ggplot(CDR3Length) | |
333 CDR3LengthPlot = CDR3LengthPlot + geom_bar(aes( x = CDR3.Length.DNA, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
334 ggtitle("Length distribution of CDR3") + | |
335 xlab("CDR3 Length") + | |
336 ylab("Percentage of sequences") | |
337 png("CDR3LengthPlot.png",width = 1280, height = 720) | |
338 CDR3LengthPlot | |
339 dev.off() | |
340 write.table(x=CDR3Length, file="CDR3LengthPlot.csv", sep=",",quote=F,row.names=F,col.names=T) | |
341 | |
342 # ---------------------- Plot the heatmaps ---------------------- | |
343 | |
344 | |
345 #get the reverse order for the V and D genes | |
346 revVchain = Vchain | |
347 revDchain = Dchain | |
348 revVchain$chr.orderV = rev(revVchain$chr.orderV) | |
349 revDchain$chr.orderD = rev(revDchain$chr.orderD) | |
350 | |
351 if(useD){ | |
352 plotVD <- function(dat){ | |
353 if(length(dat[,1]) == 0){ | |
354 return() | |
355 } | |
356 img = ggplot() + | |
357 geom_tile(data=dat, aes(x=factor(reorder(Top.D.Gene, chr.orderD)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) + | |
358 theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
359 scale_fill_gradient(low="gold", high="blue", na.value="white") + | |
360 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) + | |
361 xlab("D genes") + | |
362 ylab("V Genes") | |
363 | |
364 png(paste("HeatmapVD_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Dchain$v.name)), height=100+(15*length(Vchain$v.name))) | |
365 print(img) | |
366 dev.off() | |
367 write.table(x=acast(dat, Top.V.Gene~Top.D.Gene, value.var="Length"), file=paste("HeatmapVD_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA) | |
368 } | |
369 | |
370 VandDCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.D.Gene", "Sample")]) | |
371 | |
372 VandDCount$l = log(VandDCount$Length) | |
373 maxVD = data.frame(data.table(VandDCount)[, list(max=max(l)), by=c("Sample")]) | |
374 VandDCount = merge(VandDCount, maxVD, by.x="Sample", by.y="Sample", all.x=T) | |
375 VandDCount$relLength = VandDCount$l / VandDCount$max | |
376 | |
377 cartegianProductVD = expand.grid(Top.V.Gene = Vchain$v.name, Top.D.Gene = Dchain$v.name, Sample = unique(inputdata$Sample)) | |
378 | |
379 completeVD = merge(VandDCount, cartegianProductVD, all.y=TRUE) | |
380 completeVD = merge(completeVD, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE) | |
381 completeVD = merge(completeVD, Dchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE) | |
382 VDList = split(completeVD, f=completeVD[,"Sample"]) | |
383 | |
384 lapply(VDList, FUN=plotVD) | |
385 } | |
386 | |
387 plotVJ <- function(dat){ | |
388 if(length(dat[,1]) == 0){ | |
389 return() | |
390 } | |
391 cat(paste(unique(dat[3])[1,1])) | |
392 img = ggplot() + | |
393 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) + | |
394 theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
395 scale_fill_gradient(low="gold", high="blue", na.value="white") + | |
396 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) + | |
397 xlab("J genes") + | |
398 ylab("V Genes") | |
399 | |
400 png(paste("HeatmapVJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Vchain$v.name))) | |
401 print(img) | |
402 dev.off() | |
403 write.table(x=acast(dat, Top.V.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapVJ_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA) | |
404 } | |
405 | |
406 VandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.J.Gene", "Sample")]) | |
407 | |
408 VandJCount$l = log(VandJCount$Length) | |
409 maxVJ = data.frame(data.table(VandJCount)[, list(max=max(l)), by=c("Sample")]) | |
410 VandJCount = merge(VandJCount, maxVJ, by.x="Sample", by.y="Sample", all.x=T) | |
411 VandJCount$relLength = VandJCount$l / VandJCount$max | |
412 | |
413 cartegianProductVJ = expand.grid(Top.V.Gene = Vchain$v.name, Top.J.Gene = Jchain$v.name, Sample = unique(inputdata$Sample)) | |
414 | |
415 completeVJ = merge(VandJCount, cartegianProductVJ, all.y=TRUE) | |
416 completeVJ = merge(completeVJ, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE) | |
417 completeVJ = merge(completeVJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE) | |
418 VJList = split(completeVJ, f=completeVJ[,"Sample"]) | |
419 lapply(VJList, FUN=plotVJ) | |
420 | |
421 if(useD){ | |
422 plotDJ <- function(dat){ | |
423 if(length(dat[,1]) == 0){ | |
424 return() | |
425 } | |
426 img = ggplot() + | |
427 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.D.Gene, chr.orderD)), fill=relLength)) + | |
428 theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
429 scale_fill_gradient(low="gold", high="blue", na.value="white") + | |
430 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) + | |
431 xlab("J genes") + | |
432 ylab("D Genes") | |
433 | |
434 png(paste("HeatmapDJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Dchain$v.name))) | |
435 print(img) | |
436 dev.off() | |
437 write.table(x=acast(dat, Top.D.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapDJ_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA) | |
438 } | |
439 | |
440 | |
441 DandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.D.Gene", "Top.J.Gene", "Sample")]) | |
442 | |
443 DandJCount$l = log(DandJCount$Length) | |
444 maxDJ = data.frame(data.table(DandJCount)[, list(max=max(l)), by=c("Sample")]) | |
445 DandJCount = merge(DandJCount, maxDJ, by.x="Sample", by.y="Sample", all.x=T) | |
446 DandJCount$relLength = DandJCount$l / DandJCount$max | |
447 | |
448 cartegianProductDJ = expand.grid(Top.D.Gene = Dchain$v.name, Top.J.Gene = Jchain$v.name, Sample = unique(inputdata$Sample)) | |
449 | |
450 completeDJ = merge(DandJCount, cartegianProductDJ, all.y=TRUE) | |
451 completeDJ = merge(completeDJ, revDchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE) | |
452 completeDJ = merge(completeDJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE) | |
453 DJList = split(completeDJ, f=completeDJ[,"Sample"]) | |
454 lapply(DJList, FUN=plotDJ) | |
455 } | |
456 | |
457 | |
458 # ---------------------- calculating the clonality score ---------------------- | |
459 | |
460 if("Replicate" %in% colnames(inputdata)) #can only calculate clonality score when replicate information is available | |
461 { | |
462 if(clonality_method == "boyd"){ | |
463 samples = split(clonalityFrame, clonalityFrame$Sample, drop=T) | |
464 | |
465 for (sample in samples){ | |
466 res = data.frame(paste=character(0)) | |
467 sample_id = unique(sample$Sample)[[1]] | |
468 for(replicate in unique(sample$Replicate)){ | |
469 tmp = sample[sample$Replicate == replicate,] | |
470 clone_table = data.frame(table(tmp$clonaltype)) | |
471 clone_col_name = paste("V", replicate, sep="") | |
472 colnames(clone_table) = c("paste", clone_col_name) | |
473 res = merge(res, clone_table, by="paste", all=T) | |
474 } | |
475 | |
476 res[is.na(res)] = 0 | |
477 infer.result = infer.clonality(as.matrix(res[,2:ncol(res)])) | |
478 | |
479 write.table(data.table(infer.result[[12]]), file=paste("lymphclon_clonality_", sample_id, ".csv", sep=""), sep=",",quote=F,row.names=F,col.names=F) | |
480 | |
481 res$type = rowSums(res[,2:ncol(res)]) | |
482 | |
483 coincidence.table = data.frame(table(res$type)) | |
484 colnames(coincidence.table) = c("Coincidence Type", "Raw Coincidence Freq") | |
485 write.table(coincidence.table, file=paste("lymphclon_coincidences_", sample_id, ".csv", sep=""), sep=",",quote=F,row.names=F,col.names=T) | |
486 } | |
487 } else { | |
488 write.table(clonalityFrame, "clonalityComplete.csv", sep=",",quote=F,row.names=F,col.names=T) | |
489 | |
490 clonalFreq = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "clonaltype")]) | |
491 clonalFreqCount = data.frame(data.table(clonalFreq)[, list(Count=.N), by=c("Sample", "Type")]) | |
492 clonalFreqCount$realCount = clonalFreqCount$Type * clonalFreqCount$Count | |
493 clonalSum = data.frame(data.table(clonalFreqCount)[, list(Reads=sum(realCount)), by=c("Sample")]) | |
494 clonalFreqCount = merge(clonalFreqCount, clonalSum, by.x="Sample", by.y="Sample") | |
495 | |
496 ct = c('Type\tWeight\n2\t1\n3\t3\n4\t6\n5\t10\n6\t15') | |
497 tcct = textConnection(ct) | |
498 CT = read.table(tcct, sep="\t", header=TRUE) | |
499 close(tcct) | |
500 clonalFreqCount = merge(clonalFreqCount, CT, by.x="Type", by.y="Type", all.x=T) | |
501 clonalFreqCount$WeightedCount = clonalFreqCount$Count * clonalFreqCount$Weight | |
502 | |
503 ReplicateReads = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "Replicate", "clonaltype")]) | |
504 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(Reads=.N), by=c("Sample", "Replicate")]) | |
505 clonalFreqCount$Reads = as.numeric(clonalFreqCount$Reads) | |
506 ReplicateReads$squared = ReplicateReads$Reads * ReplicateReads$Reads | |
507 | |
508 ReplicatePrint <- function(dat){ | |
509 write.table(dat[-1], paste("ReplicateReads_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F) | |
510 } | |
511 | |
512 ReplicateSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"]) | |
513 lapply(ReplicateSplit, FUN=ReplicatePrint) | |
514 | |
515 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(ReadsSum=sum(as.numeric(Reads)), ReadsSquaredSum=sum(as.numeric(squared))), by=c("Sample")]) | |
516 clonalFreqCount = merge(clonalFreqCount, ReplicateReads, by.x="Sample", by.y="Sample", all.x=T) | |
517 | |
518 ReplicateSumPrint <- function(dat){ | |
519 write.table(dat[-1], paste("ReplicateSumReads_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F) | |
520 } | |
521 | |
522 ReplicateSumSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"]) | |
523 lapply(ReplicateSumSplit, FUN=ReplicateSumPrint) | |
524 | |
525 clonalFreqCountSum = data.frame(data.table(clonalFreqCount)[, list(Numerator=sum(WeightedCount, na.rm=T)), by=c("Sample")]) | |
526 clonalFreqCount = merge(clonalFreqCount, clonalFreqCountSum, by.x="Sample", by.y="Sample", all.x=T) | |
527 clonalFreqCount$ReadsSum = as.numeric(clonalFreqCount$ReadsSum) #prevent integer overflow | |
528 clonalFreqCount$Denominator = (((clonalFreqCount$ReadsSum * clonalFreqCount$ReadsSum) - clonalFreqCount$ReadsSquaredSum) / 2) | |
529 clonalFreqCount$Result = (clonalFreqCount$Numerator + 1) / (clonalFreqCount$Denominator + 1) | |
530 | |
531 ClonalityScorePrint <- function(dat){ | |
532 write.table(dat$Result, paste("ClonalityScore_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F) | |
533 } | |
534 | |
535 clonalityScore = clonalFreqCount[c("Sample", "Result")] | |
536 clonalityScore = unique(clonalityScore) | |
537 | |
538 clonalityScoreSplit = split(clonalityScore, f=clonalityScore[,"Sample"]) | |
539 lapply(clonalityScoreSplit, FUN=ClonalityScorePrint) | |
540 | |
541 clonalityOverview = clonalFreqCount[c("Sample", "Type", "Count", "Weight", "WeightedCount")] | |
542 | |
543 | |
544 | |
545 ClonalityOverviewPrint <- function(dat){ | |
546 write.table(dat[-1], paste("ClonalityOverView_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F) | |
547 } | |
548 | |
549 clonalityOverviewSplit = split(clonalityOverview, f=clonalityOverview$Sample) | |
550 lapply(clonalityOverviewSplit, FUN=ClonalityOverviewPrint) | |
551 } | |
552 } | |
553 | |
554 imgtcolumns = c("X3V.REGION.trimmed.nt.nb","P3V.nt.nb", "N1.REGION.nt.nb", "P5D.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "P3D.nt.nb", "N2.REGION.nt.nb", "P5J.nt.nb", "X5J.REGION.trimmed.nt.nb", "X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb") | |
555 if(all(imgtcolumns %in% colnames(inputdata))) | |
556 { | |
557 newData = data.frame(data.table(PRODF)[,list(unique=.N, | |
558 VH.DEL=mean(X3V.REGION.trimmed.nt.nb, na.rm=T), | |
559 P1=mean(P3V.nt.nb, na.rm=T), | |
560 N1=mean(N1.REGION.nt.nb, na.rm=T), | |
561 P2=mean(P5D.nt.nb, na.rm=T), | |
562 DEL.DH=mean(X5D.REGION.trimmed.nt.nb, na.rm=T), | |
563 DH.DEL=mean(X3D.REGION.trimmed.nt.nb, na.rm=T), | |
564 P3=mean(P3D.nt.nb, na.rm=T), | |
565 N2=mean(N2.REGION.nt.nb, na.rm=T), | |
566 P4=mean(P5J.nt.nb, na.rm=T), | |
567 DEL.JH=mean(X5J.REGION.trimmed.nt.nb, na.rm=T), | |
568 Total.Del=( mean(X3V.REGION.trimmed.nt.nb, na.rm=T) + | |
569 mean(X5D.REGION.trimmed.nt.nb, na.rm=T) + | |
570 mean(X3D.REGION.trimmed.nt.nb, na.rm=T) + | |
571 mean(X5J.REGION.trimmed.nt.nb, na.rm=T)), | |
572 | |
573 Total.N=( mean(N1.REGION.nt.nb, na.rm=T) + | |
574 mean(N2.REGION.nt.nb, na.rm=T)), | |
575 | |
576 Total.P=( mean(P3V.nt.nb, na.rm=T) + | |
577 mean(P5D.nt.nb, na.rm=T) + | |
578 mean(P3D.nt.nb, na.rm=T) + | |
579 mean(P5J.nt.nb, na.rm=T))), | |
580 by=c("Sample")]) | |
581 write.table(newData, "junctionAnalysisProd.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F) | |
582 | |
583 newData = data.frame(data.table(UNPROD)[,list(unique=.N, | |
584 VH.DEL=mean(X3V.REGION.trimmed.nt.nb, na.rm=T), | |
585 P1=mean(P3V.nt.nb, na.rm=T), | |
586 N1=mean(N1.REGION.nt.nb, na.rm=T), | |
587 P2=mean(P5D.nt.nb, na.rm=T), | |
588 DEL.DH=mean(X5D.REGION.trimmed.nt.nb, na.rm=T), | |
589 DH.DEL=mean(X3D.REGION.trimmed.nt.nb, na.rm=T), | |
590 P3=mean(P3D.nt.nb, na.rm=T), | |
591 N2=mean(N2.REGION.nt.nb, na.rm=T), | |
592 P4=mean(P5J.nt.nb, na.rm=T), | |
593 DEL.JH=mean(X5J.REGION.trimmed.nt.nb, na.rm=T), | |
594 Total.Del=( mean(X3V.REGION.trimmed.nt.nb, na.rm=T) + | |
595 mean(X5D.REGION.trimmed.nt.nb, na.rm=T) + | |
596 mean(X3D.REGION.trimmed.nt.nb, na.rm=T) + | |
597 mean(X5J.REGION.trimmed.nt.nb, na.rm=T)), | |
598 | |
599 Total.N=( mean(N1.REGION.nt.nb, na.rm=T) + | |
600 mean(N2.REGION.nt.nb, na.rm=T)), | |
601 | |
602 Total.P=( mean(P3V.nt.nb, na.rm=T) + | |
603 mean(P5D.nt.nb, na.rm=T) + | |
604 mean(P3D.nt.nb, na.rm=T) + | |
605 mean(P5J.nt.nb, na.rm=T))), | |
606 by=c("Sample")]) | |
607 write.table(newData, "junctionAnalysisUnProd.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F) | |
608 } | |
609 | |
610 # ---------------------- AA composition in CDR3 ---------------------- | |
611 | |
612 AACDR3 = PRODF[,c("Sample", "CDR3.Seq")] | |
613 | |
614 TotalPerSample = data.frame(data.table(AACDR3)[, list(total=sum(nchar(as.character(.SD$CDR3.Seq)))), by=Sample]) | |
615 | |
616 AAfreq = list() | |
617 | |
618 for(i in 1:nrow(TotalPerSample)){ | |
619 sample = TotalPerSample$Sample[i] | |
620 AAfreq[[i]] = data.frame(table(unlist(strsplit(as.character(AACDR3[AACDR3$Sample == sample,c("CDR3.Seq")]), "")))) | |
621 AAfreq[[i]]$Sample = sample | |
622 } | |
623 | |
624 AAfreq = ldply(AAfreq, data.frame) | |
625 AAfreq = merge(AAfreq, TotalPerSample, by="Sample", all.x = T) | |
626 AAfreq$freq_perc = as.numeric(AAfreq$Freq / AAfreq$total * 100) | |
627 | |
628 | |
629 AAorder = read.table(sep="\t", header=TRUE, text="order.aa\tAA\n1\tR\n2\tK\n3\tN\n4\tD\n5\tQ\n6\tE\n7\tH\n8\tP\n9\tY\n10\tW\n11\tS\n12\tT\n13\tG\n14\tA\n15\tM\n16\tC\n17\tF\n18\tL\n19\tV\n20\tI") | |
630 AAfreq = merge(AAfreq, AAorder, by.x='Var1', by.y='AA', all.x=TRUE) | |
631 | |
632 AAfreq = AAfreq[!is.na(AAfreq$order.aa),] | |
633 | |
634 AAfreqplot = ggplot(AAfreq) | |
635 AAfreqplot = AAfreqplot + geom_bar(aes( x=factor(reorder(Var1, order.aa)), y = freq_perc, fill = Sample), stat='identity', position='dodge' ) | |
636 AAfreqplot = AAfreqplot + annotate("rect", xmin = 0.5, xmax = 2.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2) | |
637 AAfreqplot = AAfreqplot + annotate("rect", xmin = 3.5, xmax = 4.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2) | |
638 AAfreqplot = AAfreqplot + annotate("rect", xmin = 5.5, xmax = 6.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2) | |
639 AAfreqplot = AAfreqplot + annotate("rect", xmin = 6.5, xmax = 7.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2) | |
640 AAfreqplot = AAfreqplot + ggtitle("Amino Acid Composition in the CDR3") + xlab("Amino Acid, from Hydrophilic (left) to Hydrophobic (right)") + ylab("Percentage") | |
641 | |
642 png("AAComposition.png",width = 1280, height = 720) | |
643 AAfreqplot | |
644 dev.off() | |
645 write.table(AAfreq, "AAComposition.csv" , sep=",",quote=F,na="-",row.names=F,col.names=T) | |
646 | |
647 |