@article{Nguyen_McFall_2019, place={Houston, U.S.}, title={Verification of End to End Learning Concept for Mobile Robotics Platform [Kennesaw State University]}, url={https://www.jsr.org/index.php/path/article/view/641}, DOI={10.47611/jsr.vi.641}, abstractNote={<p><span lang="EN"><span style="font-family: Arial; font-size: small;">End-to-end neural networks (EENN) utilize machine learning to make predictions or decisions without being explicitly programmed to perform tasks by considering the inputs and outputs directly. In contrast, traditional hard coded algorithmic autonomous robotics require every possibility programmed. Existing research with EENN and autonomous driving demonstrates level-two autonomy where the vehicle can assist with acceleration, braking, and environment monitoring with a human observer, such as NVIDIA’s DAVE-2 autonomous car system by utilizing case-specific computing hardware, and DeepPiCar by scaling technology down to a low power embedded computer (Raspberry Pi). The goal of this study is to recreate previous findings on a different platform and in different environments through EENN application by scaling up DeepPiCar with a NVIDIA Jetson TX2 computing board and hobbyist grade parts (e.g. 12V DC motor, Arduino) to represent ’off-the-shelf’ components when compared to DAVE-2. This advancement validates that the concept is scalable to using more generalized data, therefore easing the training process for an EENN by avoiding dataset overfitting and production of a system with a level of ’common sense’. Training data is collected via camera input and associating velocity and encoder values from a differential drive ground vehicle (DDGV) with quadrature motors at 320x240 resolution with a CSV database. Created datasets are fed into an EENN analogous to the DAVE-2 EENN layered structure: one normalization, five convolutional, three fully connected layers. The EENN is considered a convolutional neural network (assumes inputs are images and learns filters,<span> </span>e.g. edge detection, independently from a human programmer), and accuracy is measured by comparing produced velocity values against actual values from a collected validation dataset. An expected result is the DDGV navigates a human space and obstacles by the EENN only inputting sensor data and outputting velocities for each motor</span></span></p>}, journal={Journal of Student Research}, author={Nguyen, Vivian and McFall, Kevin}, year={2019}, month={Apr.} }