Skip to content

Instantly share code, notes, and snippets.

@0x61nas
Created November 16, 2023 00:42
Show Gist options
  • Save 0x61nas/d069a128946f9cc54b500e54b61b1851 to your computer and use it in GitHub Desktop.
Save 0x61nas/d069a128946f9cc54b500e54b61b1851 to your computer and use it in GitHub Desktop.
Review papers in brain tumor detection
#!/usr/bin/env just --justfile
OUTDIR := "."
_main := "main.tex"
_pdf_prog := "zathura"
_compiler := "pdflatex"
_cflags := "-halt-on-error -output-directory=" + OUTDIR
alias w := whatch
alias c := compile
alias p := preview
compile compiler=_compiler cflags=_cflags main=_main:
{{compiler}} {{cflags}} {{main}}
bibtex main
{{compiler}} {{cflags}} {{main}}
{{compiler}} {{cflags}} {{main}}
preview prog=_pdf_prog compiler=_compiler cflags=_cflags main=_main: (compile compiler cflags main)
{{prog}} {{OUTDIR}}/main.pdf
whatch prog=_pdf_prog compiler=_compiler cflags=_cflags main=_main:
just preview {{prog}} &
echo "{{main}}" | entr just compile "{{compiler}}" "{{cflags}}" "{{main}}"
clean:
rm -f *.toc *.snm *.out *.nav *.blg *.log *.bbl *.aux main.pdf
@misc{br35h,
author = {Ahmed Hamada},
title = {Br35H :: Brain Tumor Detection 2020},
howpublished = {Kaggle},
year = {2020},
url = {https://www.kaggle.com/datasets/ahmedhamada0/brain-tumor-detection},
}
@misc{wong2020yolo7,
author = {Kin-Yiu Wong},
title = {Yolov7.yaml},
howpublished = {GitHub},
year = {2020},
url = { \url{
https://github.com/WongKinYiu/yolov7/blob/main/cfg/training/yolov7.yaml
} },
note = { \url{
https://github.com/WongKinYiu/yolov7/blob/main/cfg/training/yolov7.yaml
} },
}
@article{jocher2022yolo5,
author = {Glenn Jocher},
title = {YOLOv5 (6.0/6.1) brief summary},
howpublished = {GitHub},
year = {2022},
url = {\url{https://github.com/ultralytics/yolov5/issues/6998}},
}
@incollection{Kang2023,
doi = {10.1007/978-3-031-43901-8_57},
url = {\url{https://doi.org/10.1007%2F978-3-031-43901-8_57}},
year = 2023,
publisher = {Springer Nature Switzerland},
pages = {600--610},
author = {Ming Kang and Chee-Ming Ting and Fung Fung Ting and Raphaël C.-W.
Phan},
title = {{RCS}-{YOLO}: A Fast and~High-Accuracy Object Detector for~Brain
Tumor Detection},
booktitle = {Lecture Notes in Computer Science},
}
@misc{kang2023bgfyolo,
title = {BGF-YOLO: Enhanced YOLOv8 with Multiscale Attentional Feature
Fusion for Brain Tumor Detection},
author = {Ming Kang and Chee-Ming Ting and Fung Fung Ting and Raphaël C. -W.
Phan},
year = {2023},
eprint = {2309.12585},
archivePrefix = {arXiv},
primaryClass = {cs.CV},
}
@misc{ding2021repvgg,
title = {RepVGG: Making VGG-style ConvNets Great Again},
author = {Xiaohan Ding and Xiangyu Zhang and Ningning Ma and Jungong Han and
Guiguang Ding and Jian Sun},
year = {2021},
eprint = {2101.03697},
archivePrefix = {arXiv},
primaryClass = {cs.CV},
}
@misc{iandola2014densenet,
title = {DenseNet: Implementing Efficient ConvNet Descriptor Pyramids},
author = {Forrest Iandola and Matt Moskewicz and Sergey Karayev and Ross
Girshick and Trevor Darrell and Kurt Keutzer},
year = {2014},
eprint = {1404.1869},
archivePrefix = {arXiv},
primaryClass = {cs.CV},
}
\documentclass{beamer}
\usepackage{hyperref}
\usepackage{hypernat}
\title{Review papers in brain tumor detection}
\author[The Egyptian E-learning University]
{
Abdallah~Shehata \and
Yasmine~Osama \and
Youmna~Mustafa \and
Khloud~Tareq \and
Beshoy~Nashat \and
Abeer~Yousief \and
{\emph Anas~Elgarhy}
}
\institute[EELU]{The Egyptian E-learning University}
% \usetheme{CambridgeUS}
% \usetheme{Dresden}
\usetheme{Warsaw}
% \usetheme{Hannover}
\usecolortheme{seagull}
\begin{document}
\frame{\titlepage}
\begin{frame}
\frametitle{Outline}
\tableofcontents
\end{frame}
\section{Objective}
\begin{frame}
\frametitle{Objective}
Our goal is to explore and dissect the innovative approaches presented in these papers, shedding light on the strides made in leveraging deep learning, particularly within the You Only Look Once (YOLO) framework, for the crucial task of identifying brain tumors in medical imaging.
\end{frame}
\section{RCS-YOLO}
\begin{frame}
\frametitle{Paper's Title and Authors}
\begin{block}{Paper Title}
RCS-YOLO: A Fast and High-Accuracy Object Detector for Brain Tumor Detection \cite{Kang2023}
\end{block}
\begin{block}{Authors}
Ming Kang, Chee-Ming Ting ,Fung Fung Ting, and Raphaël C.-W. Phan
\end{block}
\end{frame}
\begin{frame}
\frametitle{Objective}
\begin{itemize}
\item Introduce a novel YOLO architecture called RCS-YOLO for brain tumor detection.
\item Introduces RCS (Re-parameterized Convolution based on Channel Shuffle) and RCS-OSA (One-Shot Aggregation of RCS) for efficient feature extraction.
\item Achieve a balance between speed and accuracy in object detection.
\item Highlights the balance between speed and accuracy in YOLO frameworks.
\end{itemize}
\end{frame}
\subsection{Synopsis}
\begin{frame}
\frametitle{Synopsis}
The paper \cite{Kang2023} proposes a novel YOLO architecture called RCS-YOLO with Reparameterized Convolution based on channel Shuffle (RCS) for the automatic detection of brain tumors from MRI images.
\end{frame}
\begin{frame}
\frametitle{Synopsis}
The authors introduce RCS and a One-Shot Aggregation of RCS (RCS-OSA) to enhance information extraction and reduce time consumption, leading to improved speed and accuracy compared to YOLOv5 \cite{jocher2022yolo5}, YOLOv7, and YOLOv8.
\end{frame}
\begin{frame}
\frametitle{Synopsis}
The complexity of CNN-based models, particularly in relation to inference speed, is discussed,
with a focus on advanced models such as RepVGG \cite{ding2021repvgg} and the impact of techniques
like reparameterization, grouped convolutions, and channel shuffle operations on the computational complexity and information flow.
\end{frame}
\begin{frame}
\frametitle{Synopsis}
RCS-YOLO is designed to leverage RepVGG/RepConv and incorporates an RCS-OSA module to facilitate feature cascade and reduce network fragmentation.
The proposed model is evaluated using a brain tumor dataset, demonstrating superior performance compared to other state-of-the-art YOLO architectures.
\end{frame}
\begin{frame}
\frametitle{Synopsis}
The paper also presents a detailed examination of the RCS architecture and the benefits of the channel shuffle operation
in reducing computational complexity while enabling efficient feature information communication.
Additionally, the One-Shot Aggregation (OSA) module is highlighted for overcoming the inefficiencies of dense connections in DenseNet \cite{iandola2014densenet}, demonstrating its effectiveness for diverse feature representations.
\end{frame}
\begin{frame}
\frametitle{Synopsis}
The study provides comparative metrics of detection effectiveness, including precision, recall, AP50, AP50:95, FLOPs, and Frames Per Second (FPS), to highlight the advantages of the proposed model.
The results show that RCS-YOLO outperforms YOLOv7, YOLOv6-L v3.0, and YOLOv8l in terms of accuracy and speed,
demonstrating its superiority for brain tumor detection. Overall, the paper emphasizes the effectiveness of the proposed RCS-YOLO network for fast and accurate medical object detection, particularly in the context of brain tumor detection from MRI images.
\end{frame}
% \subsection{Overveiw}
% \begin{frame}
% \frametitle{Methodology}
% \begin{itemize}
% \item Propose RCS (Re-parameterized Convolution based on channel Shuffle) and RCS-OSA (One-Shot Aggregation of RCS) modules.
% \pause
% \item Use channel shuffle operations inspired by ShuffleNet to enhance information fusion and feature extraction.
% \pause
% \item Employ multi-scale feature fusion for efficient communication between different groups of convolutions.
% \pause
% \item Reduce computational complexity and memory consumption for faster inference.
% \end{itemize}
% \end{frame}
% \begin{frame}
% \frametitle{Results}
% \begin{itemize}
% \item The prposed model outperform YOLOv6, YOLOv7, and YOLOv8 in terms of speed and accuracy.
% \pause
% \item Precision improvement by 1\%, and a 60\% increase in inference speed compared to YOLOv7.
% \pause
% \item Achieve state-of-the-art performance on the brain tumor detection task.
% \pause
% \item Compares the model with other object detectors using metrics such as precision, recall, AP50, FLOPs, and FPS.
% \end{itemize}
% \end{frame}
\subsection{Key findings}
\begin{frame}
\frametitle{The major findings and results of the paper}
\begin{enumerate}
\item \textbf{RCS-YOLO Outperforms YOLO Versions:} The proposed RCS-YOLO model demonstrates superior performance in terms of both speed and accuracy compared to YOLOv6, YOLOv7, and YOLOv8.
\pause
\item \textbf{Precision Improvement:} Notably, RCS-YOLO shows a 1\% improvement in precision compared to YOLOv7, contributing to enhanced object detection capabilities.
\pause
\item \textbf{Significant Speed Enhancement:} RCS-YOLO achieves a 60\% increase in inference speed at 114.8 images detected per second (FPS) compared to YOLOv7, indicating efficient real-time processing for brain tumor detection.
\pause
\item \textbf{State-of-the-Art Performance:} The experimental results on the Br35H \cite{br35h} dataset position RCS-YOLO as a state-of-the-art model for the specific task of brain tumor detection.
\end{enumerate}
\end{frame}
\subsection{Relevance}
\begin{frame}
\frametitle{Addressing a Gap in YOLO Applications}
While YOLO frameworks are recognized for their efficiency in object detection, their application in brain tumor detection is underexplored. The paper fills this gap by proposing and evaluating RCS-YOLO specifically for this medical imaging task.
\end{frame}
\begin{frame}
\frametitle{Performance Comparison with Established Models}
The comparative analysis against YOLOv6, YOLOv7, and YOLOv8 provides valuable insights into the strengths of RCS-YOLO. This information is crucial for understanding the effectiveness of different models in the context of brain tumor detection.
\end{frame}
\begin{frame}
\frametitle{Incorporating Innovative Architectures}
The introduction of the RCS-OSA module, inspired by ShuffleNet and addressing the limitations of other CNN-based models, showcases the innovative architectural considerations made in RCS-YOLO. This can inform future research on the design of efficient and accurate object detection models.
\end{frame}
\begin{frame}
\frametitle{Practical Implications}
The demonstrated improvements in precision and speed have practical implications for real-world applications of brain tumor detection, potentially influencing the choice of models in clinical settings or medical research.
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Paper 2 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{BGF-YOLO}
\begin{frame}
\frametitle{Paper's Title and Authors}
\begin{block}{Paper Title}
BGF-YOLO: Enhanced YOLOv8 with Multiscale Attentional Feature Fusion for Brain Tumor Detection \cite{kang2023bgfyolo}
\end{block}
\begin{block}{Paper Authors}
Ming Kang, Chee-Ming Ting, Fung Fung Ting, Raphaël C.-W. Phan
\end{block}
\end{frame}
\subsection{Synopsis}
\begin{frame}
\frametitle{Synopsis}
The paper aims to enhance brain tumor detection using You Only Look Once (YOLO)-based object detectors.
Specifically, the authors introduce a novel architecture, BGF-YOLO, which incorporates Bi-level Routing Attention (BRA), Generalized feature pyramid networks (GFPN), and a Fourth detecting head into the YOLOv8 framework.
\end{frame}
\begin{frame}
\frametitle{Synopsis}
The main objective is to improve the accuracy and efficiency of brain tumor detection through attention mechanisms,
feature fusion networks, and additional detecting heads.
\end{frame}
\subsection{Key Findings}
\begin{frame}
\frametitle{The major findings and results of the paper}
\begin{enumerate}
\item BGF-YOLO demonstrates a 4.7\% absolute increase in mean average precision (mAP50) compared to YOLOv8x.
\pause
\item The attention mechanism (BRA) dynamically weighs the importance of features, contributing to improved accuracy.
\pause
\item The GFPN structure in the BGF-YOLO model outperforms other feature fusion techniques, leading to enhanced precision and mAP.
\pause
\item The addition of a Fourth detecting head improves detection capacity for objects of various sizes, crucial in brain tumor detection scenarios.
\pause
\item Ablation studies reveal the significant impact of BRA, GFPN, the fourth head, and GIoU on accuracy improvement.
\end{enumerate}
\end{frame}
\subsection{Relevance}
\begin{frame}
\frametitle{Why this paper is relevant to us}
\begin{itemize}
\item This paper is relevant to the literature review as it contributes to the exploration of advanced techniques for brain tumor detection, specifically within the YOLO framework.
\pause
\item The introduction of attention mechanisms, feature fusion networks, and additional detecting heads addresses limitations identified in existing YOLO-based models.
\pause
\item The findings demonstrate a notable improvement in accuracy, making BGF-YOLO a promising approach for automatic brain tumor detection
\pause
\item The paper aligns with the broader context of leveraging deep learning architectures to enhance medical image analysis, contributing valuable insights to the field of computer-aided diagnosis for brain tumors.
\end{itemize}
\end{frame}
\section*{References}
\begin{frame}
\begin{center}
\Huge{Thanks!}
\end{center}
\end{frame}
\bibliographystyle{ieeetr}
\bibliography{main.bib}
\end{document}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment