11
|
1 # ---------------------- load/install packages ----------------------
|
0
|
2
|
|
3 if (!("gridExtra" %in% rownames(installed.packages()))) {
|
11
|
4 install.packages("gridExtra", repos="http://cran.xl-mirror.nl/")
|
0
|
5 }
|
|
6 library(gridExtra)
|
|
7 if (!("ggplot2" %in% rownames(installed.packages()))) {
|
11
|
8 install.packages("ggplot2", repos="http://cran.xl-mirror.nl/")
|
0
|
9 }
|
11
|
10 library(ggplot2)
|
0
|
11 if (!("plyr" %in% rownames(installed.packages()))) {
|
11
|
12 install.packages("plyr", repos="http://cran.xl-mirror.nl/")
|
0
|
13 }
|
11
|
14 library(plyr)
|
0
|
15
|
|
16 if (!("data.table" %in% rownames(installed.packages()))) {
|
11
|
17 install.packages("data.table", repos="http://cran.xl-mirror.nl/")
|
0
|
18 }
|
|
19 library(data.table)
|
|
20
|
6
|
21 if (!("reshape2" %in% rownames(installed.packages()))) {
|
11
|
22 install.packages("reshape2", repos="http://cran.xl-mirror.nl/")
|
6
|
23 }
|
|
24 library(reshape2)
|
|
25
|
27
|
26 if (!("lymphclon" %in% rownames(installed.packages()))) {
|
|
27 install.packages("lymphclon", repos="http://cran.xl-mirror.nl/")
|
|
28 }
|
|
29 library(lymphclon)
|
|
30
|
11
|
31 # ---------------------- parameters ----------------------
|
0
|
32
|
11
|
33 args <- commandArgs(trailingOnly = TRUE)
|
0
|
34
|
11
|
35 infile = args[1] #path to input file
|
|
36 outfile = args[2] #path to output file
|
|
37 outdir = args[3] #path to output folder (html/images/data)
|
|
38 clonaltype = args[4] #clonaltype definition, or 'none' for no unique filtering
|
21
|
39 ct = unlist(strsplit(clonaltype, ","))
|
11
|
40 species = args[5] #human or mouse
|
|
41 locus = args[6] # IGH, IGK, IGL, TRB, TRA, TRG or TRD
|
|
42 filterproductive = ifelse(args[7] == "yes", T, F) #should unproductive sequences be filtered out? (yes/no)
|
27
|
43 clonality_method = args[8]
|
11
|
44
|
|
45 # ---------------------- Data preperation ----------------------
|
|
46
|
|
47 inputdata = read.table(infile, sep="\t", header=TRUE, fill=T, comment.char="")
|
0
|
48
|
11
|
49 setwd(outdir)
|
|
50
|
|
51 # remove weird rows
|
21
|
52 inputdata = inputdata[inputdata$Sample != "",]
|
0
|
53
|
11
|
54 #remove the allele from the V,D and J genes
|
|
55 inputdata$Top.V.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.V.Gene)
|
|
56 inputdata$Top.D.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.D.Gene)
|
|
57 inputdata$Top.J.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.J.Gene)
|
21
|
58
|
11
|
59 inputdata$clonaltype = 1:nrow(inputdata)
|
21
|
60
|
11
|
61 PRODF = inputdata
|
20
|
62 UNPROD = inputdata
|
11
|
63 if(filterproductive){
|
|
64 if("Functionality" %in% colnames(inputdata)) { # "Functionality" is an IMGT column
|
|
65 PRODF = inputdata[inputdata$Functionality == "productive" | inputdata$Functionality == "productive (see comment)", ]
|
20
|
66 UNPROD = inputdata[!(inputdata$Functionality == "productive" | inputdata$Functionality == "productive (see comment)"), ]
|
11
|
67 } else {
|
|
68 PRODF = inputdata[inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" , ]
|
20
|
69 UNPROD = inputdata[!(inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" ), ]
|
11
|
70 }
|
0
|
71 }
|
|
72
|
25
|
73 clonalityFrame = PRODF
|
|
74
|
11
|
75 #remove duplicates based on the clonaltype
|
|
76 if(clonaltype != "none"){
|
21
|
77 clonaltype = paste(clonaltype, ",Sample", sep="") #add sample column to clonaltype, unique within samples
|
17
|
78 PRODF$clonaltype = do.call(paste, c(PRODF[unlist(strsplit(clonaltype, ","))], sep = ":"))
|
11
|
79 PRODF = PRODF[!duplicated(PRODF$clonaltype), ]
|
25
|
80
|
26
|
81 UNPROD$clonaltype = do.call(paste, c(UNPROD[unlist(strsplit(clonaltype, ","))], sep = ":"))
|
|
82 UNPROD = UNPROD[!duplicated(UNPROD$clonaltype), ]
|
|
83
|
25
|
84 #again for clonalityFrame but with sample+replicate
|
|
85 clonalityFrame$clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(clonaltype, ","))], sep = ":"))
|
|
86 clonalityFrame$clonality_clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(paste(clonaltype, ",Replicate", sep=""), ","))], sep = ":"))
|
|
87 clonalityFrame = clonalityFrame[!duplicated(clonalityFrame$clonality_clonaltype), ]
|
11
|
88 }
|
0
|
89
|
11
|
90 PRODF$freq = 1
|
0
|
91
|
11
|
92 if(any(grepl(pattern="_", x=PRODF$ID))){ #the frequency can be stored in the ID with the pattern ".*_freq_.*"
|
|
93 PRODF$freq = gsub("^[0-9]+_", "", PRODF$ID)
|
|
94 PRODF$freq = gsub("_.*", "", PRODF$freq)
|
|
95 PRODF$freq = as.numeric(PRODF$freq)
|
|
96 if(any(is.na(PRODF$freq))){ #if there was an "_" in the ID, but not the frequency, go back to frequency of 1 for every sequence
|
|
97 PRODF$freq = 1
|
|
98 }
|
|
99 }
|
10
|
100
|
|
101
|
|
102
|
11
|
103 #write the complete dataset that is left over, will be the input if 'none' for clonaltype and 'no' for filterproductive
|
|
104 write.table(PRODF, "allUnique.csv", sep=",",quote=F,row.names=F,col.names=T)
|
0
|
105
|
11
|
106 #write the samples to a file
|
|
107 sampleFile <- file("samples.txt")
|
|
108 un = unique(inputdata$Sample)
|
|
109 un = paste(un, sep="\n")
|
|
110 writeLines(un, sampleFile)
|
|
111 close(sampleFile)
|
|
112
|
15
|
113 # ---------------------- Counting the productive/unproductive and unique sequences ----------------------
|
|
114
|
|
115 inputdata.dt = data.table(inputdata) #for speed
|
|
116
|
|
117 if(clonaltype == "none"){
|
21
|
118 ct = c("clonaltype")
|
15
|
119 }
|
|
120
|
|
121 inputdata.dt$samples_replicates = paste(inputdata.dt$Sample, inputdata.dt$Replicate, sep="_")
|
|
122 samples_replicates = c(unique(inputdata.dt$samples_replicates), unique(as.character(inputdata.dt$Sample)))
|
|
123 frequency_table = data.frame(ID = samples_replicates[order(samples_replicates)])
|
|
124
|
|
125
|
|
126 sample_productive_count = inputdata.dt[, list(All=.N,
|
|
127 Productive = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",]),
|
|
128 perc_prod = 1,
|
|
129 Productive_unique = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",list(count=.N),by=ct]),
|
|
130 perc_prod_un = 1,
|
|
131 Unproductive= nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",]),
|
|
132 perc_unprod = 1,
|
|
133 Unproductive_unique =nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",list(count=.N),by=ct]),
|
|
134 perc_unprod_un = 1),
|
|
135 by=c("Sample")]
|
|
136
|
|
137 sample_productive_count$perc_prod = round(sample_productive_count$Productive / sample_productive_count$All * 100)
|
|
138 sample_productive_count$perc_prod_un = round(sample_productive_count$Productive_unique / sample_productive_count$All * 100)
|
|
139
|
|
140 sample_productive_count$perc_unprod = round(sample_productive_count$Unproductive / sample_productive_count$All * 100)
|
|
141 sample_productive_count$perc_unprod_un = round(sample_productive_count$Unproductive_unique / sample_productive_count$All * 100)
|
|
142
|
|
143
|
|
144 sample_replicate_productive_count = inputdata.dt[, list(All=.N,
|
|
145 Productive = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",]),
|
|
146 perc_prod = 1,
|
|
147 Productive_unique = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",list(count=.N),by=ct]),
|
|
148 perc_prod_un = 1,
|
|
149 Unproductive= nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",]),
|
|
150 perc_unprod = 1,
|
|
151 Unproductive_unique =nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",list(count=.N),by=ct]),
|
|
152 perc_unprod_un = 1),
|
|
153 by=c("samples_replicates")]
|
|
154
|
|
155 sample_replicate_productive_count$perc_prod = round(sample_replicate_productive_count$Productive / sample_replicate_productive_count$All * 100)
|
|
156 sample_replicate_productive_count$perc_prod_un = round(sample_replicate_productive_count$Productive_unique / sample_replicate_productive_count$All * 100)
|
|
157
|
|
158 sample_replicate_productive_count$perc_unprod = round(sample_replicate_productive_count$Unproductive / sample_replicate_productive_count$All * 100)
|
|
159 sample_replicate_productive_count$perc_unprod_un = round(sample_replicate_productive_count$Unproductive_unique / sample_replicate_productive_count$All * 100)
|
|
160
|
|
161 setnames(sample_replicate_productive_count, colnames(sample_productive_count))
|
|
162
|
|
163 counts = rbind(sample_replicate_productive_count, sample_productive_count)
|
|
164 counts = counts[order(counts$Sample),]
|
|
165
|
|
166 write.table(x=counts, file="productive_counting.txt", sep=",",quote=F,row.names=F,col.names=F)
|
|
167
|
11
|
168 # ---------------------- Frequency calculation for V, D and J ----------------------
|
|
169
|
|
170 PRODFV = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.V.Gene")])
|
0
|
171 Total = ddply(PRODFV, .(Sample), function(x) data.frame(Total = sum(x$Length)))
|
|
172 PRODFV = merge(PRODFV, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
|
|
173 PRODFV = ddply(PRODFV, c("Sample", "Top.V.Gene"), summarise, relFreq= (Length*100 / Total))
|
|
174
|
11
|
175 PRODFD = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.D.Gene")])
|
0
|
176 Total = ddply(PRODFD, .(Sample), function(x) data.frame(Total = sum(x$Length)))
|
|
177 PRODFD = merge(PRODFD, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
|
|
178 PRODFD = ddply(PRODFD, c("Sample", "Top.D.Gene"), summarise, relFreq= (Length*100 / Total))
|
|
179
|
11
|
180 PRODFJ = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.J.Gene")])
|
0
|
181 Total = ddply(PRODFJ, .(Sample), function(x) data.frame(Total = sum(x$Length)))
|
|
182 PRODFJ = merge(PRODFJ, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
|
|
183 PRODFJ = ddply(PRODFJ, c("Sample", "Top.J.Gene"), summarise, relFreq= (Length*100 / Total))
|
|
184
|
23
|
185 # ---------------------- Setting up the gene names for the different species/loci ----------------------
|
11
|
186
|
23
|
187 Vchain = ""
|
|
188 Dchain = ""
|
|
189 Jchain = ""
|
10
|
190
|
23
|
191 if(species == "custom"){
|
|
192 print("Custom genes: ")
|
|
193 splt = unlist(strsplit(locus, ";"))
|
|
194 print(paste("V:", splt[1]))
|
|
195 print(paste("D:", splt[2]))
|
|
196 print(paste("J:", splt[3]))
|
|
197
|
|
198 Vchain = unlist(strsplit(splt[1], ","))
|
|
199 Vchain = data.frame(v.name = Vchain, chr.orderV = 1:length(Vchain))
|
|
200
|
|
201 Dchain = unlist(strsplit(splt[2], ","))
|
|
202 if(length(Dchain) > 0){
|
|
203 Dchain = data.frame(v.name = Dchain, chr.orderD = 1:length(Dchain))
|
|
204 } else {
|
|
205 Dchain = data.frame(v.name = character(0), chr.orderD = numeric(0))
|
|
206 }
|
|
207
|
|
208 Jchain = unlist(strsplit(splt[3], ","))
|
|
209 Jchain = data.frame(v.name = Jchain, chr.orderJ = 1:length(Jchain))
|
10
|
210
|
23
|
211 } else {
|
|
212 genes = read.table("genes.txt", sep="\t", header=TRUE, fill=T, comment.char="")
|
|
213
|
|
214 Vchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "V",c("IMGT.GENE.DB", "chr.order")]
|
|
215 colnames(Vchain) = c("v.name", "chr.orderV")
|
|
216 Dchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "D",c("IMGT.GENE.DB", "chr.order")]
|
|
217 colnames(Dchain) = c("v.name", "chr.orderD")
|
|
218 Jchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "J",c("IMGT.GENE.DB", "chr.order")]
|
|
219 colnames(Jchain) = c("v.name", "chr.orderJ")
|
|
220 }
|
10
|
221 useD = TRUE
|
22
|
222 if(nrow(Dchain) == 0){
|
11
|
223 useD = FALSE
|
|
224 cat("No D Genes in this species/locus")
|
10
|
225 }
|
23
|
226 print(paste("useD:", useD))
|
|
227
|
22
|
228 # ---------------------- merge with the frequency count ----------------------
|
11
|
229
|
0
|
230 PRODFV = merge(PRODFV, Vchain, by.x='Top.V.Gene', by.y='v.name', all.x=TRUE)
|
|
231
|
|
232 PRODFD = merge(PRODFD, Dchain, by.x='Top.D.Gene', by.y='v.name', all.x=TRUE)
|
|
233
|
|
234 PRODFJ = merge(PRODFJ, Jchain, by.x='Top.J.Gene', by.y='v.name', all.x=TRUE)
|
|
235
|
11
|
236 # ---------------------- Create the V, D and J frequency plots and write the data.frame for every plot to a file ----------------------
|
0
|
237
|
|
238 pV = ggplot(PRODFV)
|
|
239 pV = pV + geom_bar( aes( x=factor(reorder(Top.V.Gene, chr.orderV)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
|
240 pV = pV + xlab("Summary of V gene") + ylab("Frequency") + ggtitle("Relative frequency of V gene usage")
|
6
|
241 write.table(x=PRODFV, file="VFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
|
0
|
242
|
|
243 png("VPlot.png",width = 1280, height = 720)
|
|
244 pV
|
|
245 dev.off();
|
|
246
|
11
|
247 if(useD){
|
|
248 pD = ggplot(PRODFD)
|
|
249 pD = pD + geom_bar( aes( x=factor(reorder(Top.D.Gene, chr.orderD)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
|
250 pD = pD + xlab("Summary of D gene") + ylab("Frequency") + ggtitle("Relative frequency of D gene usage")
|
|
251 write.table(x=PRODFD, file="DFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
|
|
252
|
|
253 png("DPlot.png",width = 800, height = 600)
|
|
254 print(pD)
|
|
255 dev.off();
|
|
256 }
|
0
|
257
|
11
|
258 pJ = ggplot(PRODFJ)
|
|
259 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
|
260 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage")
|
|
261 write.table(x=PRODFJ, file="JFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
|
|
262
|
|
263 png("JPlot.png",width = 800, height = 600)
|
|
264 pJ
|
0
|
265 dev.off();
|
|
266
|
|
267 pJ = ggplot(PRODFJ)
|
|
268 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
|
269 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage")
|
6
|
270 write.table(x=PRODFJ, file="JFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
|
0
|
271
|
|
272 png("JPlot.png",width = 800, height = 600)
|
|
273 pJ
|
|
274 dev.off();
|
|
275
|
11
|
276 # ---------------------- Now the frequency plots of the V, D and J families ----------------------
|
|
277
|
6
|
278 VGenes = PRODF[,c("Sample", "Top.V.Gene")]
|
|
279 VGenes$Top.V.Gene = gsub("-.*", "", VGenes$Top.V.Gene)
|
|
280 VGenes = data.frame(data.table(VGenes)[, list(Count=.N), by=c("Sample", "Top.V.Gene")])
|
|
281 TotalPerSample = data.frame(data.table(VGenes)[, list(total=sum(.SD$Count)), by=Sample])
|
|
282 VGenes = merge(VGenes, TotalPerSample, by="Sample")
|
|
283 VGenes$Frequency = VGenes$Count * 100 / VGenes$total
|
|
284 VPlot = ggplot(VGenes)
|
8
|
285 VPlot = VPlot + geom_bar(aes( x = Top.V.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
11
|
286 ggtitle("Distribution of V gene families") +
|
|
287 ylab("Percentage of sequences")
|
6
|
288 png("VFPlot.png")
|
|
289 VPlot
|
|
290 dev.off();
|
|
291 write.table(x=VGenes, file="VFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
|
|
292
|
11
|
293 if(useD){
|
|
294 DGenes = PRODF[,c("Sample", "Top.D.Gene")]
|
|
295 DGenes$Top.D.Gene = gsub("-.*", "", DGenes$Top.D.Gene)
|
|
296 DGenes = data.frame(data.table(DGenes)[, list(Count=.N), by=c("Sample", "Top.D.Gene")])
|
|
297 TotalPerSample = data.frame(data.table(DGenes)[, list(total=sum(.SD$Count)), by=Sample])
|
|
298 DGenes = merge(DGenes, TotalPerSample, by="Sample")
|
|
299 DGenes$Frequency = DGenes$Count * 100 / DGenes$total
|
|
300 DPlot = ggplot(DGenes)
|
|
301 DPlot = DPlot + geom_bar(aes( x = Top.D.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
302 ggtitle("Distribution of D gene families") +
|
|
303 ylab("Percentage of sequences")
|
|
304 png("DFPlot.png")
|
|
305 print(DPlot)
|
|
306 dev.off();
|
|
307 write.table(x=DGenes, file="DFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
|
|
308 }
|
6
|
309
|
|
310 JGenes = PRODF[,c("Sample", "Top.J.Gene")]
|
|
311 JGenes$Top.J.Gene = gsub("-.*", "", JGenes$Top.J.Gene)
|
|
312 JGenes = data.frame(data.table(JGenes)[, list(Count=.N), by=c("Sample", "Top.J.Gene")])
|
|
313 TotalPerSample = data.frame(data.table(JGenes)[, list(total=sum(.SD$Count)), by=Sample])
|
|
314 JGenes = merge(JGenes, TotalPerSample, by="Sample")
|
|
315 JGenes$Frequency = JGenes$Count * 100 / JGenes$total
|
|
316 JPlot = ggplot(JGenes)
|
8
|
317 JPlot = JPlot + geom_bar(aes( x = Top.J.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
11
|
318 ggtitle("Distribution of J gene families") +
|
|
319 ylab("Percentage of sequences")
|
6
|
320 png("JFPlot.png")
|
|
321 JPlot
|
|
322 dev.off();
|
|
323 write.table(x=JGenes, file="JFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T)
|
|
324
|
11
|
325 # ---------------------- Plotting the cdr3 length ----------------------
|
|
326
|
8
|
327 CDR3Length = data.frame(data.table(PRODF)[, list(Count=.N), by=c("Sample", "CDR3.Length.DNA")])
|
|
328 TotalPerSample = data.frame(data.table(CDR3Length)[, list(total=sum(.SD$Count)), by=Sample])
|
|
329 CDR3Length = merge(CDR3Length, TotalPerSample, by="Sample")
|
|
330 CDR3Length$Frequency = CDR3Length$Count * 100 / CDR3Length$total
|
|
331 CDR3LengthPlot = ggplot(CDR3Length)
|
|
332 CDR3LengthPlot = CDR3LengthPlot + geom_bar(aes( x = CDR3.Length.DNA, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
11
|
333 ggtitle("Length distribution of CDR3") +
|
|
334 xlab("CDR3 Length") +
|
|
335 ylab("Percentage of sequences")
|
8
|
336 png("CDR3LengthPlot.png",width = 1280, height = 720)
|
|
337 CDR3LengthPlot
|
|
338 dev.off()
|
|
339 write.table(x=CDR3Length, file="CDR3LengthPlot.csv", sep=",",quote=F,row.names=F,col.names=T)
|
|
340
|
11
|
341 # ---------------------- Plot the heatmaps ----------------------
|
|
342
|
|
343
|
|
344 #get the reverse order for the V and D genes
|
0
|
345 revVchain = Vchain
|
|
346 revDchain = Dchain
|
|
347 revVchain$chr.orderV = rev(revVchain$chr.orderV)
|
|
348 revDchain$chr.orderD = rev(revDchain$chr.orderD)
|
|
349
|
11
|
350 if(useD){
|
|
351 plotVD <- function(dat){
|
|
352 if(length(dat[,1]) == 0){
|
|
353 return()
|
|
354 }
|
|
355 img = ggplot() +
|
|
356 geom_tile(data=dat, aes(x=factor(reorder(Top.D.Gene, chr.orderD)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) +
|
|
357 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
358 scale_fill_gradient(low="gold", high="blue", na.value="white") +
|
|
359 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
|
|
360 xlab("D genes") +
|
|
361 ylab("V Genes")
|
|
362
|
|
363 png(paste("HeatmapVD_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Dchain$v.name)), height=100+(15*length(Vchain$v.name)))
|
|
364 print(img)
|
|
365 dev.off()
|
|
366 write.table(x=acast(dat, Top.V.Gene~Top.D.Gene, value.var="Length"), file=paste("HeatmapVD_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA)
|
|
367 }
|
|
368
|
|
369 VandDCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.D.Gene", "Sample")])
|
|
370
|
|
371 VandDCount$l = log(VandDCount$Length)
|
|
372 maxVD = data.frame(data.table(VandDCount)[, list(max=max(l)), by=c("Sample")])
|
|
373 VandDCount = merge(VandDCount, maxVD, by.x="Sample", by.y="Sample", all.x=T)
|
|
374 VandDCount$relLength = VandDCount$l / VandDCount$max
|
|
375
|
|
376 cartegianProductVD = expand.grid(Top.V.Gene = Vchain$v.name, Top.D.Gene = Dchain$v.name, Sample = unique(inputdata$Sample))
|
|
377
|
|
378 completeVD = merge(VandDCount, cartegianProductVD, all.y=TRUE)
|
|
379 completeVD = merge(completeVD, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE)
|
|
380 completeVD = merge(completeVD, Dchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE)
|
|
381 VDList = split(completeVD, f=completeVD[,"Sample"])
|
|
382
|
|
383 lapply(VDList, FUN=plotVD)
|
0
|
384 }
|
|
385
|
|
386 plotVJ <- function(dat){
|
11
|
387 if(length(dat[,1]) == 0){
|
|
388 return()
|
|
389 }
|
|
390 cat(paste(unique(dat[3])[1,1]))
|
|
391 img = ggplot() +
|
|
392 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) +
|
|
393 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
394 scale_fill_gradient(low="gold", high="blue", na.value="white") +
|
|
395 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
|
|
396 xlab("J genes") +
|
|
397 ylab("V Genes")
|
|
398
|
|
399 png(paste("HeatmapVJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Vchain$v.name)))
|
|
400 print(img)
|
|
401 dev.off()
|
|
402 write.table(x=acast(dat, Top.V.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapVJ_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA)
|
0
|
403 }
|
|
404
|
|
405 VandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.J.Gene", "Sample")])
|
|
406
|
|
407 VandJCount$l = log(VandJCount$Length)
|
|
408 maxVJ = data.frame(data.table(VandJCount)[, list(max=max(l)), by=c("Sample")])
|
|
409 VandJCount = merge(VandJCount, maxVJ, by.x="Sample", by.y="Sample", all.x=T)
|
|
410 VandJCount$relLength = VandJCount$l / VandJCount$max
|
|
411
|
11
|
412 cartegianProductVJ = expand.grid(Top.V.Gene = Vchain$v.name, Top.J.Gene = Jchain$v.name, Sample = unique(inputdata$Sample))
|
0
|
413
|
|
414 completeVJ = merge(VandJCount, cartegianProductVJ, all.y=TRUE)
|
|
415 completeVJ = merge(completeVJ, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE)
|
|
416 completeVJ = merge(completeVJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE)
|
|
417 VJList = split(completeVJ, f=completeVJ[,"Sample"])
|
|
418 lapply(VJList, FUN=plotVJ)
|
|
419
|
11
|
420 if(useD){
|
|
421 plotDJ <- function(dat){
|
|
422 if(length(dat[,1]) == 0){
|
|
423 return()
|
|
424 }
|
|
425 img = ggplot() +
|
|
426 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.D.Gene, chr.orderD)), fill=relLength)) +
|
|
427 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
428 scale_fill_gradient(low="gold", high="blue", na.value="white") +
|
|
429 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
|
|
430 xlab("J genes") +
|
|
431 ylab("D Genes")
|
|
432
|
|
433 png(paste("HeatmapDJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Dchain$v.name)))
|
|
434 print(img)
|
|
435 dev.off()
|
|
436 write.table(x=acast(dat, Top.D.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapDJ_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA)
|
|
437 }
|
|
438
|
|
439
|
|
440 DandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.D.Gene", "Top.J.Gene", "Sample")])
|
|
441
|
|
442 DandJCount$l = log(DandJCount$Length)
|
|
443 maxDJ = data.frame(data.table(DandJCount)[, list(max=max(l)), by=c("Sample")])
|
|
444 DandJCount = merge(DandJCount, maxDJ, by.x="Sample", by.y="Sample", all.x=T)
|
|
445 DandJCount$relLength = DandJCount$l / DandJCount$max
|
|
446
|
|
447 cartegianProductDJ = expand.grid(Top.D.Gene = Dchain$v.name, Top.J.Gene = Jchain$v.name, Sample = unique(inputdata$Sample))
|
|
448
|
|
449 completeDJ = merge(DandJCount, cartegianProductDJ, all.y=TRUE)
|
|
450 completeDJ = merge(completeDJ, revDchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE)
|
|
451 completeDJ = merge(completeDJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE)
|
|
452 DJList = split(completeDJ, f=completeDJ[,"Sample"])
|
|
453 lapply(DJList, FUN=plotDJ)
|
0
|
454 }
|
|
455
|
10
|
456
|
11
|
457 # ---------------------- calculating the clonality score ----------------------
|
0
|
458
|
11
|
459 if("Replicate" %in% colnames(inputdata)) #can only calculate clonality score when replicate information is available
|
|
460 {
|
27
|
461 if(clonality_method == "boyd"){
|
|
462 samples = split(clonalityFrame, clonalityFrame$Sample, drop=T)
|
|
463
|
|
464 for (sample in samples){
|
|
465 res = data.frame(paste=character(0))
|
|
466 sample_id = unique(sample$Sample)[[1]]
|
|
467 for(replicate in unique(sample$Replicate)){
|
|
468 tmp = sample[sample$Replicate == replicate,]
|
|
469 clone_table = data.frame(table(tmp$clonaltype))
|
|
470 clone_col_name = paste("V", replicate, sep="")
|
|
471 colnames(clone_table) = c("paste", clone_col_name)
|
|
472 res = merge(res, clone_table, by="paste", all=T)
|
|
473 }
|
|
474
|
|
475 res[is.na(res)] = 0
|
|
476 infer.result = infer.clonality(as.matrix(res[,2:ncol(res)]))
|
|
477
|
|
478 write.table(data.table(infer.result[[12]]), file=paste("lymphclon_clonality_", sample_id, ".csv", sep=""), sep=",",quote=F,row.names=F,col.names=F)
|
|
479
|
|
480 res$type = rowSums(res[,2:ncol(res)])
|
|
481
|
|
482 coincidence.table = data.frame(table(res$type))
|
|
483 colnames(coincidence.table) = c("Coincidence Type", "Raw Coincidence Freq")
|
|
484 write.table(coincidence.table, file=paste("lymphclon_coincidences_", sample_id, ".csv", sep=""), sep=",",quote=F,row.names=F,col.names=T)
|
|
485 }
|
|
486 } else {
|
|
487 write.table(clonalityFrame, "clonalityComplete.csv", sep=",",quote=F,row.names=F,col.names=T)
|
|
488
|
|
489 clonalFreq = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "clonaltype")])
|
|
490 clonalFreqCount = data.frame(data.table(clonalFreq)[, list(Count=.N), by=c("Sample", "Type")])
|
|
491 clonalFreqCount$realCount = clonalFreqCount$Type * clonalFreqCount$Count
|
|
492 clonalSum = data.frame(data.table(clonalFreqCount)[, list(Reads=sum(realCount)), by=c("Sample")])
|
|
493 clonalFreqCount = merge(clonalFreqCount, clonalSum, by.x="Sample", by.y="Sample")
|
|
494
|
|
495 ct = c('Type\tWeight\n2\t1\n3\t3\n4\t6\n5\t10\n6\t15')
|
|
496 tcct = textConnection(ct)
|
|
497 CT = read.table(tcct, sep="\t", header=TRUE)
|
|
498 close(tcct)
|
|
499 clonalFreqCount = merge(clonalFreqCount, CT, by.x="Type", by.y="Type", all.x=T)
|
|
500 clonalFreqCount$WeightedCount = clonalFreqCount$Count * clonalFreqCount$Weight
|
|
501
|
|
502 ReplicateReads = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "Replicate", "clonaltype")])
|
|
503 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(Reads=.N), by=c("Sample", "Replicate")])
|
|
504 clonalFreqCount$Reads = as.numeric(clonalFreqCount$Reads)
|
|
505 ReplicateReads$squared = ReplicateReads$Reads * ReplicateReads$Reads
|
|
506
|
|
507 ReplicatePrint <- function(dat){
|
|
508 write.table(dat[-1], paste("ReplicateReads_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F)
|
|
509 }
|
|
510
|
|
511 ReplicateSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"])
|
|
512 lapply(ReplicateSplit, FUN=ReplicatePrint)
|
|
513
|
|
514 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(ReadsSum=sum(as.numeric(Reads)), ReadsSquaredSum=sum(as.numeric(squared))), by=c("Sample")])
|
|
515 clonalFreqCount = merge(clonalFreqCount, ReplicateReads, by.x="Sample", by.y="Sample", all.x=T)
|
|
516
|
|
517 ReplicateSumPrint <- function(dat){
|
|
518 write.table(dat[-1], paste("ReplicateSumReads_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F)
|
|
519 }
|
|
520
|
|
521 ReplicateSumSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"])
|
|
522 lapply(ReplicateSumSplit, FUN=ReplicateSumPrint)
|
|
523
|
|
524 clonalFreqCountSum = data.frame(data.table(clonalFreqCount)[, list(Numerator=sum(WeightedCount, na.rm=T)), by=c("Sample")])
|
|
525 clonalFreqCount = merge(clonalFreqCount, clonalFreqCountSum, by.x="Sample", by.y="Sample", all.x=T)
|
|
526 clonalFreqCount$ReadsSum = as.numeric(clonalFreqCount$ReadsSum) #prevent integer overflow
|
|
527 clonalFreqCount$Denominator = (((clonalFreqCount$ReadsSum * clonalFreqCount$ReadsSum) - clonalFreqCount$ReadsSquaredSum) / 2)
|
|
528 clonalFreqCount$Result = (clonalFreqCount$Numerator + 1) / (clonalFreqCount$Denominator + 1)
|
|
529
|
|
530 ClonalityScorePrint <- function(dat){
|
|
531 write.table(dat$Result, paste("ClonalityScore_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F)
|
|
532 }
|
|
533
|
|
534 clonalityScore = clonalFreqCount[c("Sample", "Result")]
|
|
535 clonalityScore = unique(clonalityScore)
|
|
536
|
|
537 clonalityScoreSplit = split(clonalityScore, f=clonalityScore[,"Sample"])
|
|
538 lapply(clonalityScoreSplit, FUN=ClonalityScorePrint)
|
|
539
|
|
540 clonalityOverview = clonalFreqCount[c("Sample", "Type", "Count", "Weight", "WeightedCount")]
|
|
541
|
|
542
|
|
543
|
|
544 ClonalityOverviewPrint <- function(dat){
|
|
545 write.table(dat[-1], paste("ClonalityOverView_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F)
|
|
546 }
|
|
547
|
|
548 clonalityOverviewSplit = split(clonalityOverview, f=clonalityOverview$Sample)
|
|
549 lapply(clonalityOverviewSplit, FUN=ClonalityOverviewPrint)
|
11
|
550 }
|
0
|
551 }
|
1
|
552
|
11
|
553 imgtcolumns = c("X3V.REGION.trimmed.nt.nb","P3V.nt.nb", "N1.REGION.nt.nb", "P5D.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "P3D.nt.nb", "N2.REGION.nt.nb", "P5J.nt.nb", "X5J.REGION.trimmed.nt.nb", "X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb")
|
|
554 if(all(imgtcolumns %in% colnames(inputdata)))
|
1
|
555 {
|
19
|
556 newData = data.frame(data.table(PRODF)[,list(unique=.N,
|
21
|
557 VH.DEL=mean(X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
558 P1=mean(P3V.nt.nb, na.rm=T),
|
|
559 N1=mean(N1.REGION.nt.nb, na.rm=T),
|
|
560 P2=mean(P5D.nt.nb, na.rm=T),
|
|
561 DEL.DH=mean(X5D.REGION.trimmed.nt.nb, na.rm=T),
|
|
562 DH.DEL=mean(X3D.REGION.trimmed.nt.nb, na.rm=T),
|
|
563 P3=mean(P3D.nt.nb, na.rm=T),
|
|
564 N2=mean(N2.REGION.nt.nb, na.rm=T),
|
|
565 P4=mean(P5J.nt.nb, na.rm=T),
|
|
566 DEL.JH=mean(X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
567 Total.Del=( mean(X3V.REGION.trimmed.nt.nb, na.rm=T) +
|
|
568 mean(X5D.REGION.trimmed.nt.nb, na.rm=T) +
|
|
569 mean(X3D.REGION.trimmed.nt.nb, na.rm=T) +
|
|
570 mean(X5J.REGION.trimmed.nt.nb, na.rm=T)),
|
|
571
|
|
572 Total.N=( mean(N1.REGION.nt.nb, na.rm=T) +
|
|
573 mean(N2.REGION.nt.nb, na.rm=T)),
|
|
574
|
|
575 Total.P=( mean(P3V.nt.nb, na.rm=T) +
|
|
576 mean(P5D.nt.nb, na.rm=T) +
|
|
577 mean(P3D.nt.nb, na.rm=T) +
|
|
578 mean(P5J.nt.nb, na.rm=T))),
|
|
579 by=c("Sample")])
|
20
|
580 write.table(newData, "junctionAnalysisProd.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F)
|
|
581
|
21
|
582 newData = data.frame(data.table(UNPROD)[,list(unique=.N,
|
|
583 VH.DEL=mean(X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
584 P1=mean(P3V.nt.nb, na.rm=T),
|
|
585 N1=mean(N1.REGION.nt.nb, na.rm=T),
|
|
586 P2=mean(P5D.nt.nb, na.rm=T),
|
|
587 DEL.DH=mean(X5D.REGION.trimmed.nt.nb, na.rm=T),
|
|
588 DH.DEL=mean(X3D.REGION.trimmed.nt.nb, na.rm=T),
|
|
589 P3=mean(P3D.nt.nb, na.rm=T),
|
|
590 N2=mean(N2.REGION.nt.nb, na.rm=T),
|
|
591 P4=mean(P5J.nt.nb, na.rm=T),
|
|
592 DEL.JH=mean(X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
593 Total.Del=( mean(X3V.REGION.trimmed.nt.nb, na.rm=T) +
|
|
594 mean(X5D.REGION.trimmed.nt.nb, na.rm=T) +
|
|
595 mean(X3D.REGION.trimmed.nt.nb, na.rm=T) +
|
|
596 mean(X5J.REGION.trimmed.nt.nb, na.rm=T)),
|
|
597
|
|
598 Total.N=( mean(N1.REGION.nt.nb, na.rm=T) +
|
|
599 mean(N2.REGION.nt.nb, na.rm=T)),
|
|
600
|
|
601 Total.P=( mean(P3V.nt.nb, na.rm=T) +
|
|
602 mean(P5D.nt.nb, na.rm=T) +
|
|
603 mean(P3D.nt.nb, na.rm=T) +
|
|
604 mean(P5J.nt.nb, na.rm=T))),
|
|
605 by=c("Sample")])
|
20
|
606 write.table(newData, "junctionAnalysisUnProd.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F)
|
5
|
607 }
|
24
|
608
|
|
609 # ---------------------- AA composition in CDR3 ----------------------
|
|
610
|
|
611 AACDR3 = PRODF[,c("Sample", "CDR3.Seq")]
|
|
612
|
|
613 TotalPerSample = data.frame(data.table(AACDR3)[, list(total=sum(nchar(as.character(.SD$CDR3.Seq)))), by=Sample])
|
|
614
|
|
615 AAfreq = list()
|
|
616
|
|
617 for(i in 1:nrow(TotalPerSample)){
|
|
618 sample = TotalPerSample$Sample[i]
|
|
619 AAfreq[[i]] = data.frame(table(unlist(strsplit(as.character(AACDR3[AACDR3$Sample == sample,c("CDR3.Seq")]), ""))))
|
|
620 AAfreq[[i]]$Sample = sample
|
|
621 }
|
|
622
|
|
623 AAfreq = ldply(AAfreq, data.frame)
|
|
624 AAfreq = merge(AAfreq, TotalPerSample, by="Sample", all.x = T)
|
|
625 AAfreq$freq_perc = as.numeric(AAfreq$Freq / AAfreq$total * 100)
|
|
626
|
|
627
|
|
628 AAorder = read.table(sep="\t", header=TRUE, text="order.aa\tAA\n1\tR\n2\tK\n3\tN\n4\tD\n5\tQ\n6\tE\n7\tH\n8\tP\n9\tY\n10\tW\n11\tS\n12\tT\n13\tG\n14\tA\n15\tM\n16\tC\n17\tF\n18\tL\n19\tV\n20\tI")
|
|
629 AAfreq = merge(AAfreq, AAorder, by.x='Var1', by.y='AA', all.x=TRUE)
|
|
630
|
|
631 AAfreq = AAfreq[!is.na(AAfreq$order.aa),]
|
|
632
|
|
633 AAfreqplot = ggplot(AAfreq)
|
|
634 AAfreqplot = AAfreqplot + geom_bar(aes( x=factor(reorder(Var1, order.aa)), y = freq_perc, fill = Sample), stat='identity', position='dodge' )
|
|
635 AAfreqplot = AAfreqplot + annotate("rect", xmin = 0.5, xmax = 2.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2)
|
|
636 AAfreqplot = AAfreqplot + annotate("rect", xmin = 3.5, xmax = 4.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2)
|
|
637 AAfreqplot = AAfreqplot + annotate("rect", xmin = 5.5, xmax = 6.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2)
|
|
638 AAfreqplot = AAfreqplot + annotate("rect", xmin = 6.5, xmax = 7.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2)
|
|
639 AAfreqplot = AAfreqplot + ggtitle("Amino Acid Composition in the CDR3") + xlab("Amino Acid, from Hydrophilic (left) to Hydrophobic (right)") + ylab("Percentage")
|
|
640
|
|
641 png("AAComposition.png",width = 1280, height = 720)
|
|
642 AAfreqplot
|
|
643 dev.off()
|
|
644 write.table(AAfreq, "AAComposition.csv" , sep=",",quote=F,na="-",row.names=F,col.names=T)
|
|
645
|
|
646
|