% Some of these references have annotations. If you find any of the % annotations to be inaccurate, please let me know about the problem % and I'll fix it asap. % These references were obtained from many sources, but the vast majority % were from D.R. Mani's dissertation proposal. He has sometimes % used the abstract field for annotations, but it is quite evident % when this is the case. My thanks to D.R. Mani, Stan Franklin, % Joydeep Ghosh, S.C. Kak, Phil Kohn, Jacob M. Murre, Soheil Shams % Jim Steck, Thilo Reski, Greg Heileman, Tim Brown and Mike Glover % for responding to my request for references. % I have divided the references into 2 sections - structured nets % and others, including nmulti-layer backprop nets. The division is % approximate, particularly since I have not seen several of % the original papers as yet % Ben Gomes (gomes@icsi.berkeley.edu) % note:Alsobibclean will reorder names to be of the form first last, % if needed % NEW ADDITIONS: These are appended to the main bibliography as I % receive them. Eventually they will be incorporated into the summary % and the main bibliography. % From Igor Milosavlevich, Oct 6 93 % From Manavendra Misra Oct 8 1993 @String{jpdc = "Journal of Parallel and Distributed Computing} @String{ieeetkde = "IEEE Transactions on Knowledge and Data Engrg."} @String{ieeec = "IEEE Computer"} @String{icsi = "International Computer Science Institute"} @String{ics = "Institute for Cognitive Science, University of California, San Diego"} % --------------- Parallel Simulators ----------------------------- @Article{feld88, author = "Jerome A. Feldman and Mark A. Fanty and Nigel H. Goddard and Kenton J. Lynne", title = "Computing with Structured Connectionist Networks", journal = "Communications of the ACM", year = "1988", volume = "31", number = "2", pages = "170--187", month = feb, note = "Describes RCS on the BBN in fair detail", } @Book{urs, author = "Urs A. Muller", title = "Simulation of Neural Networks on Parallel Computers", publisher = "Hartung Gorre Verlag Konstanz", year = 1993, editor = "Wolfganga Fichtner et al.", volume = 23, series = "Series in Microelectronics", annote = "Simulator for layered nets on the MUSIC ring architecture. Specification in NeuroBasic" } @TechReport{lutz94, author = "Lutz Prechelt", title = "CuPit -- {A} Parallel Language for Neural Algorithms: Language Reference and Tutorial", institution = "Karlsruhe University", year = 1994, number = "4/94", address = "76128 Karlsruhe, Germany", month = jan, email = "prechelt@ira.uka.de" } @Article{lutz94b, author = "Lutz Prechelt", title = "Data Locality and Load Balancing for Parallel Neural Network Learning", journal = "Submitted to: Computer and Software Engineering", year = 1994, month = "April" annote = "Mapping onto the MasPar MP-1" } @Article{lange90, author = "Trent Lange", title = "Simulation of Heterogeneous Neural Networks on Serial and Parallel Machines", journal = "Parallel Computing", year = "90", vol = "14", annote = "Unit level simulator on the CM-2 for general heterogeneous structured networks" } @Article{angeniol90, author = "Bernard Angeniol", title = "Pygmalion: {ESPRIT} {II} Project 2059, Neurocomputing", journal = "IEEE Micro", year = "1990", month = dec } % --------------- Analysis of sparse nets ----------------------- @Article{murre93, author = "Jacob M. J. Murre", title = "Transputers and Neural Networks: An Analysis of Implementation Constraints and Performance", journal = "IEEE Transactions on Neural Nets", year = "1993", volume = "4", number = "2", month = mar, } @Article{ghosh89, author = "Joydeep Ghosh and Kai Hwang", title = "Mapping Neural Networks onto Message Passing Multicomputers", journal = "Journal of Parallel and Distributed Processing", year = "1988", volume = "6", pages = "291--330", } % ========================= Structured Nets ==================== @techreport{ICSI-TR-94-031, author = "D.R. Mani and Lokendra Shastri", title = "Massively Parallel Real-Time Reasoning with Very Large Knowledge Bases: An Interim Report", address = "Berkeley, CA", year = "August 1994", publisher = "International Computer Science Institute", number = "TR-94-031", abstract = "We map structured connectionist models of knowledge representation and reasoning ontoexisting general purpose massively parallel architectures with the objective of developing and implementing practical, real-time reasoning systems. SHRUTI, a connectionist knowledge representation and reasoning system which attempts to model reflexive reasoning, serves as our representative connectionist model. Realizations of SHRUTI are developed on the Connection Machine CM-2--an SIMD architecture--and on the connection Machine CM-5--an MIMD architecture. Though SIMD implementations on the CM-2 are reasonably fast--requiring a few seconds to tens of seconds for answering queries--experiments indicate that SPMD message passing systems are vastly superior to SIMD systems and offer hundred-fold speedups. The CM-5 implementation can encode large knowledge bases with several hundred thousand (randomly generated) rules and facts, and respond in under 500 milliseconds to a range of queries requiring inference depths of up to eight. This work provides some new insights into the simulation of structured connectionist networks on massively parallel machines and is a step toward developing large yet efficient knowledge representation and reasoning systems." } @Article{kim93, author = "J. T. Kim and D. I. Moldovan", title = "Classification and retrieval of knowledge on parallel marker-passing architecture", journal = ieeetkde, month = oct, year = "1993", } @InCollection{bic87, author = "Lubomir Bic", title = "Data-driven Processing of Semantic Nets", booktitle = "Parallel computation and computers for artificial intelligence", publisher = "Kluwer Academic Publishers", year = "1988", editor = "Janusz S. Kowalik", chapter = "7", pages = "131--149", } @Article{shams93, author = "S. Shams", title = "Implementing Arbitrarily Connected Neural Networks on the {DREAM} Machine", journal = "Proceedings of the World Congress on Neural Networks", year = "1993", volume = "4", pages = "869--873", } @PhdThesis{shams92a, author = "S. Shams", title = "{DREAM} Machine - {A} Platform for Efficient Implementation of Neural Networks with Arbitrarily Complex Interconnection Structures", school = "University of Southern California", year = "1992", note = "Hughes Research Labs TR 588", address = "Los Angeles, CA", } @Article{shams92b, author = "S. Shams and J.-L. Gaudiot", title = "Efficient Implementation of Neural Networks on the {DREAM} Machine", journal = "Proceedings of the 11th Inter. Conf. on Pattern Recognition", year = "1992", volume = "4", pages = "204--208", } @InCollection{Hinton81, author = "Geoffrey E. Hinton", title = "Implementing Semantic Networks in Parallel Hardware", booktitle = "Parallel Models of Associative Memory", editor = "Geoffrey E. Hinton and James A. Anderson", publisher = "Lawrence Erlbaum Associates", address = "{NJ}", year = "1981", } @TechReport{simic92, author = "P. Simic and S. Shams", title = "Solving the Assignment and Scheduling Problems using Cnet", institution = "California Institute of Technology", year = "1992", number = "CALT-68-1892", } @InCollection{Wilson89, author = "M. A. Wilson and J. M Bower", title = "The Simulation of Large-Scale Neural Networks", booktitle = "Methods in Neuronal Modeling: From Synapses to Networks", editor = "C. Koch and I. Segev", pages = "291--333", publisher = "{MIT} Press", address = "Cambridge, {MA}", year = "1989", abstract = "Considers the construction and simulation of neural network models that are fundamentally based on the anatomical structure and physiological characteristics of actual biological networks. Network modeling considerations are discussed and the simulation and analysis of an example network (the piriform cortex) is explained in detail.", } @InProceedings{Evett89a, author = "Matthew P. Evett, Lee Spector and James A. Hendler", title = "Knowledge Representation on the Connection Machine", booktitle = "Proceedings of the Supercomputing Conference", address = "Reno, {NV}", year = "1989", pages = "283--293", } @InCollection{Smotroff90, author = "Ira G. Smotroff", title = "Dataflow Architectures: Flexible Platforms for Neural Network Simulation", booktitle = nips, year = "1990", } @InProceedings{Wilson89:IJCAI, author = "Stephen S. Wilson", title = "Neural Computing in a One-Dimensional {SIMD} Array", booktitle = "Proceedings of the International Joint Conference on Artificial Intelligence", year = "1989", pages = "206--211", } @Article{Forrest87, author = "Roweth, D., Stroud, N., Wallace, D. J. Forrest B. M. and G. V. Wilson", title = "Implementing Neural Network Models on Parallel Computers", journal = "Computer Journal", volume = "30", number = "5", pages = "413--419", year = "1987", } @Article{blell87, author = "Guy Blelloch and Charles R. Rosenberg", title = "Network Learning on the Connection Machine", journal = "Proceedings of the Tenth International Joint Conference on Artificial Intelligence", year = "1987", } @Article{Nordstrom92, author = "T. {Nordstr\"om} and B. Svensson", title = "Using and Designing Massively Parallel Computers for Artificial Neural Networks", journal = "Journal of Parallel and Distributed Computing", category = "gennnmapping", volume = "14", number = "3", pages = "260--285", year = "1992", } @Article{wei91, author = "Viktor K. Prasanna Wei-Ming Lin and K. Wojtek Przytula", title = "Algorithmic Mapping of Neural Network Models onto Parallel {SIMD} Machines", journal = ieeec, year = "1991", month = Dec, volume = "40", number = "12", abstract = "Describes parallel implementations of sparsely connected neural networks on 2D SIMD (mesh-connected) machines. Mappings are based on a graph-theoretic approach. Neural net updates (multiply-accumulate) are performed by implementing fast permutation algorithms to move the data (weights and activations) to the required (mesh-adjacent) locations on the grid.", } @inproceedings{Evett89, author = "Matthew P. Evett and Lee Spector and James Hendler", title = "Knowledge Representation on the Connection Machine", booktitle = "Proceedings Supercomputing '89", year = "89", publisher = "ACM Press, Order Number 415892", month = "November" , year = "1989" } @TechReport{Evett90a, author = "Matthew P. Evett and James A. Hendler and Lee Spector", title = "{PARKA}: Parallel Knowledge Representation on the Connection Machine", institution = "University of Maryland at College Park", year = "1990", number = "CS-TR-2409", email = "hendler@dormouse.cs.umd.edu", } @InProceedings{Evett90b, author = "Matthew P. Evett and James A. Hendler and A. Mahanti and Dana Nau", title = "{PRA*}: {A} Memory-Limited Heuristic Search Procedure for the Connection Machine", booktitle = "Proceedings of Frontiers '90, The Third Symposium on the Frontiers of Massively Parallel Processing", publisher = "{IEEE} Computer Society Press", address = "Los Alamitos, {CA}", month = oct, year = "1990", } @InProceedings{evett93, author = "Mathew P. Evett and William A. Andersen and James A. Hendler", title = "Massively Parallel Support for Efficient Knowledge Representation", booktitle = "International Joint Conference on Artificial Intelligence", year = "1993", } @TechReport{evett92, author = "Matthew P. Evett and James A. Hendler", title = "An Update of {PARKA}, a Massively Parallel Knowledge Representation System", institution = "University of Maryland, College Park", year = "1992", number = "CS-TR-2850", } @Article{Lin91, author = "Wei-Ming Lin and Viktor K. Prasanna and K. Wojtek Przytula", title = "Algorithmic mapping of Neural Network Models onto Parallel {SIMD} Machines", journal = "{IEEE} Transactions on Computers", volume = "40", number = "12", pages = "1390--1401", year = "1991", abstract = "Describes parallel implementations of sparsely connected neural networks on 2D SIMD (mesh-connected) machines. Mappings are based on a graph-theoretic approach. Neural net updates (multiply-accumulate) are performed by implementing fast permutation algorithms to move the data (weights and activations) to the required (mesh-adjacent) locations on the grid.", } @InProceedings{Chung89, author = "M. Chung and Y. Chung", title = "Data Parallel Simulation Using Time-Warp on the Connection Machine", booktitle = "Proceedings of the 26$^{th}$ {ACM}/{IEEE} Conference on Design Automation", address = "Las Vegas, {NV}", year = "1989", pages = "98--103", } @Article{chung92, author = "Sang-Hwa Chung and D. I. Moldovan", title = "Modeling Semantic Networks on the Connection Machine", pages = "152--163", journal = jpdc, volume = "17", year = "1992", } @TechReport{Fox87, author = "G. C. Fox and W. Furmanski", title = "Hypercube communication for neural network algorithms", institution = "California Institute of Technology", year = "1987", number = "{C$^3$}P--405", } @InProceedings{Belloch87, author = "Guy Belloch and Charles R. Rosenberg", title = "Network Learning on the Connection Machine", booktitle = "Proceedings of the Tenth International Joint Conference on Artificial Intelligence", address = "Milan, Italy", year = "1987", } @InCollection{Hastings87, author = "H. M. Hastings and S. Waner", title = "Neural Nets on the {MPP}", booktitle = "Frontiers of Massively Parallel Scientific Computation", editor = "J. R. Fisher", publisher = "{NASA}", year = "1987", } @InProceedings{Przytula90, author = "K. Wojtek Przytula and Wei-Ming Lin and Viktor K. Prasanna", title = "Partitioned Implementation of Neural Networks on Mesh Connected Array Processors", booktitle = "Proceedings of the 1990 {IEEE} Workshop on {VLSI} Signal Processing", year = "1990", } @InProceedings{Shams90, author = "Soheil Shams and K. Wojtek Przytula", title = "Mapping of Neural Networks onto Programmable Parallel Machines", booktitle = "Proceedings of the {IEEE} International Symposium on Circuits and Systems", address = "New Orleans, {LA}", month = may, year = "1990", volume = "4", pages = "2613--2617", } @InProceedings{Brown87, author = "Nathan H. Brown Jr.", title = "Neural Network Implementation approaches for the Connection Machine", booktitle = "Neural Information Processing Systems", editor = "Dana Z. Anderson", address = "Denver, {CO}", year = "1987", pages = "127--136", } @Article{Deprit89, author = "E. Deprit", title = "Implementing Recurrent Back-Propagation on the Connection Machine", journal = "Neural Networks", volume = "2", number = "4", pages = "295--314", year = "1989", } % ----- Others - some for fully connected multilayer structures, % ------ hopfield nets and some unread papers ------------------ @Article{Obermayer90, author = "K. Obermeyer and H. Ritter and K. Schulten", title = "Large-Scale Simulations of Self-Organizing Neural Networks on Parallel Computers: Application to Biological Modeling", journal = "Parallel Computation", volume = "14", number = "3", pages = "381--404", year = "1990", } @InProceedings{Grajski90, author = "K. A. Grajski and G. Chinn and C. Chen and C. Kuszmaul and S. Tomboulian", title = "Neural Network Simulation on the {MasPar MP-1} Massively Parallel Processor", booktitle = "International Neural Network Conference", address = "Paris, France", year = "1990", volume = "2", pages = "673", note = "Abstract only", } @InProceedings{Watanabe89, author = "Takumi Watanabe and Yoshi Sugiyama and Toshio Kondo and Yoshihiro Kitamura", title = "Neural Network Simulation on a Massively Parallel Cellular Array Processor: {AAP-2}", booktitle = "International Joint Conference on Neural Networks", address = "Washington, {DC}", year = "1989", volume = "2", pages = "155--161", } @InProceedings{Barash89, author = "S. C. Baresh and M. A. Eshera", title = "The Systolic Array Neurocomputer: Fine-Grained Parallelism at the Synaptic Level", booktitle = "International Joint Conference on Neural Networks", address = "Washington, {DC}", year = "1989", volume = "2", pages = "613", note = "Abstract only", abstract = "Neural models of computing are defined in termas of large numbers of interconnected neuron-like units. These models have been implemented on various parallel processors, employing relatively coarse-grained parallelism at the level of neurons or groups of neurons. We present a new algorithm for parallelism at the synaptic level on fine-grained mesh- connected systolic arrays. The resulting system is shown to perform extremely well, computing at the rate of 300 million connections per second (CPS) during generalized delta rule learning for a multilayered neural network.", } @TechReport{Witbrock89, author = "M. Witbrock and M. Zagha", title = "An Implementation of Back-Propagation Learning on {GF11}, a Large {SIMD} Parallel Computer", institution = "Carnegie-Mellon University", year = "1989", number = "CMU-CS-89-208", } @InProceedings{Krikelis90, author = "A. Krikelis and M. {Gr\"ozinger}", title = "Implementing Neural Networks with the Associative String Processor", booktitle = "International Workshop for Artificial Intelligence and Neural Networks", address = "Oxford, England", year = "1990", } @InProceedings{Kung88, author = "S. Y. Kung and J. N. Hwang", title = "Parallel Architectures for Artificial Neural Nets", booktitle = "International Conference on Neural Networks", address = "San Diego, {CA}", year = "1988", volume = "2", pages = "165--172", } @Article{Kung89, author = "S. Y. Kung and J. N. Hwang", title = "A Unified Systolic Architecture for Artificial Neural Networks", journal = "Journal of Parallel and Distributed Computing", volume = "6", pages = "358--387", year = "1989", } @InProceedings{Yoon90b, author = "Hyunsoo Yoon and Hong H. Nang and S. R. Maeng", title = "A Distributed Backpropagation Algorithm of Neural Networks on Distributed-Memory Multiprocessors", booktitle = "Proceedings of Frontiers '90, The Third Symposium on the Frontiers of Massively Parallel Processing", address = "College Park, {MD}", year = "1990", pages = "358--363", } @InProceedings{Fontaine92, author = "Thomas Fontaine", title = "Data-parallel Training of Spatiotemporal Connectionist Networks on the Connection Machine", booktitle = "Proceedings of the International Joint Conference on Neural Networks", volume = "4", pages = "555--559", year = "1992", } @TechReport{Fontaine92:TR, author = "Thomas Fontaine", title = "{GRAD-CM2}: {A} Data-Parallel Connectionist Network Simulator", institution = "University of Pennsylvania", year = "1992", month = jul, number = "MS-CIS-92-55", } @Article{James92, author = "Mark James and Doan Hoang", title = "Design of Low-Cost, Real-Time Simulation Systems for Large Neural Networks", journal = "Journal of Parallel and Distributed Computing", volume = "14", number = "3", pages = "221--235", year = "1992", } @InProceedings{Krikelis91, author = "Anargyros Krikelis", title = "A Novel Massively Parallel Associative Processing Architecture for the Implementation of Artificial Neural Networks", booktitle = "Proceedings of the 1991 International Conference on Acoustics, Speech, and Signal Processing", address = "Toronto, Canada", month = may, year = "1991", pages = "1057--1060", } @InProceedings{Omidvar91, author = "O. M. Omidvar and C. L. Wilson", title = "Massively Parallel Implementation of Neural Network Architectures", booktitle = "{SPIE/IS\&T} Symposium on Electronic Imaging Science and Technology", address = "San Jose, {CA}", month = feb, year = "1991", pages = "532--543", } @InProceedings{Shams91, author = "Soheil Shams and Jean-Luc Gaudiot", title = "Parallel Methods for Implementations of Neural Networks", booktitle = "International Joint Conference on Neural Networks", address = "Seattle, {WA}", month = jul, year = "1991", pages = "988", notes = "Summary form only", } %Abstract: Summary form only given.The inherent parallelism available %in a neural network structure seems to indicate a simple method for %parallel implementation in hardware. Unfortunately, the diversity in %the type of neural network, limited analytical data on their %computational requirements, and demanding communication requirements %have all been significant impediments to the development of a %general-purpose massively parallel neurocomputer. The authors have %established a basic taxonomy of neural network implementations based %on the granularity of parallelism exploited. A detailed analysis of %the possible sources of parallelism in neural network models, along %with architectural characteristics and their effective use in neural %computation, was carried out with each class of implementations. This %analysis is intended to be used as a framework for the design of %future neurocomputer systems. @InProceedings{Oohashi91, author = "Takeshi Oohashi and Toshiaki Ejima", title = "An Artificial Neural Network Simulator on the Loosely Coupled Parallel Processors", booktitle = "International Joint Conference on Neural Networks", address = "Seattle, {WA}", month = jul, year = "1991", pages = "922", abstract = "Summary form only", } %Summary form only given, as follows. The authors examine the %parallelism of a multilayered ANN (artificial neural network) and %discuss a parallel algorithm suited to loosely coupled parallel %processors. A mapping of a multilayered network to large-grain %processors is proposed, and its performance is evaluated. For a %two-layer backpropagation model which has N units in each layer, the %highest speedup ratio is obtained with 8N processors but the parallel %efficiency is less than 20%. With 2N processors and N/2 processors, %the parallel efficiencies of the mapping are 50% and 80%, %respectively.It is also shown that the proposed parallel algorithm is %more efficient for a larger network. @InProceedings{Midorikawa91, author = "Hiroko Midorikawa", title = "The Parallel Processing of Neural Networks on Ring-Shaped Multiprocessor", booktitle = "Proceedings of the 1991 {IEEE} Pacific Rim Conference on Communications, Computers and Signal Processing", address = "Victoria, Canada", month = may, year = "1991", pages = "115--119", } @TechReport{Morgan90, author = "Nelson Morgan", title = "The Ring Array Processor ({RAP}): algorithms and architecture", institution = "International Computer Science Institute", number = "90-047", month = sep, year = "1990", } @InProceedings{Cosnard91, author = "M. Cosnard and J. C. Mignot and H. Paugam-Moisy", title = "Implementations of Multilayer Neural Networks on Parallel Architectures", booktitle = "Second International Specialist Seminar on Design and Application of Parallel Digital Processors", address = "Lisbon, Port", month = apr, year = "1991", pages = "43--47", } @InProceedings{Kumar90, author = "V. K. Prasanna Kumar and K. Wojtek Przytula", title = "Algorithmic Mapping of Neural Network Models onto Parallel {SIMD} Machines", booktitle = "Proceedings of the 1990 International Conference on Application Specific Array Processors", address = "Princeton, {NJ}", month = sep, year = "1990", pages = "259--271", } @InProceedings{Gupta90, author = "S. N. Gupta and M. Zubair and C. E. Grosch", title = "Simulation of Neural Networks on a Massively Parallel Computer ({DAP-510}) Using Sparse Matrix Techniques", booktitle = "Proceedings of the 3rd Symposium on the Frontiers of Massively Parallel Computation", address = "College Park, {MD}", month = oct, year = "1990", pages = "376--379", } @InProceedings{Lange89, author = "Trent E. Lange and Jack B. Hodges and Maria E. Fuenmayor and Leonid V. Belyaev", title = "Simulating Hybrid Connectionist Architectures", booktitle = "Winter Simulation Conference Proceedings", address = "Washington, {DC}", month = dec, year = "1989", pages = "569--578", } @InBook{paugh93, author = "Helene Paugam-Moisy", title = "Parallel Neural Computing Based on Network Duplicating", chapter = 10, publisher = "John Wiley and Sons Ltd.", year = 1993, editor = "I. Pitas" } @InProceedings{Asokan90, author = "N. Asokan and Ravi Shankar and Kishan Mehrotra and Chilukuri Mohan and Sanjay Ranka", title = "A Neural Network Simulator for the Connection Machine", booktitle = "Proceedings of the 5th {IEEE} International Symposium on Intelligent Control", address = "Philadelphia, {PA}", month = sep, year = "1990", pages = "518--523", abstract = "The design, development, and performance of CM-RCS, a neural network simulator for the Connection Machine (CM), are described. The design of the simulator is based on the Rochester Connectionist Simulator (RCS) for connectionist networks. The CM-RCS can be used as a stand-alone system or as a high-performance parallel back end to RCS. In the latter case, once the network has been built by RCS, the back-end system constructs and executes an equivalent network on the CM processor array. The CM-RCS facilitates the exploitation of the massive parallelism inherent in connectionist networks. It also enables substantial reduction in the training times of connectionist networks.", } @Article{steck92, author = "K. Krishnamurthy and J. Steck and B. McMillin and G. Leininger", title = "Parallel Implemtation of a Recursive Least-Squares Neural Network on the Intel i{PSC}/2", journal = "Journal of Parallel and Distributed Computing", year = "1993", volume = "18", pages = "89--93", } @InProceedings{nunez90, author = "Fernando J. Nunez and Jose A. B. Fortes", title = "Performance of Connectionist Learning Algorithms on 2-{D} {SIMD} Processor Arrays", booktitle = "Neural Information and Processing Systems 2", year = "1990", publisher = "Morgan Kaufmann", } @Article{chu92, author = "Lon-Chan Chu and Benjamin W. Wah", title = "Optimal Mapping of Neural-Network Learning on Message-Passing Multicomputers", journal = jpdc, year = "1992", volume = "14", pages = "319--339", } @Article{garzon92, author = "William Bagget and William Boyd and Max Garzon and Stan Franklin and Dinah Dickerson", title = "Design and Testing of a General Purpose Neurocomputer", journal = jpdc, year = "1992", volume = "14", pages = "203--220", } @Article{wang89, author = "Chan-Hwa Wu, Chia-Jiu Wang and S. Sivasundaram", title = "Neural Network Simulation on Shared-Memory Vector Multiprocessors", journal = "Proceedings of Supercomputing 89, Reno NV.", year = "1989", pages = "197--203", } @Article{yoon90a, author = "Hyunsoo Yoon and Jong H. Nang", title = "Multilayer Neural Networks on Distributed Memory Multiprocessors", journal = "International Neural Network Conference", year = "1990", volume = "2", } @InCollection{Nelson89, author = "M. E. Nelson, W. Furmanski and J. M. Bower", title = "Simulating Neurons and Networks on Parallel Computers", booktitle = "Methods in Neuronal Modeling: From Synapses to Networks", editor = "C. Koch and I. Segev", pages = "397--437", publisher = "{MIT} Press", address = "Cambridge, {MA}", year = "1989", } @Article{pomerleau88, author = "Gusciora, George L., Touretsky, David S., Pomerleau Dean A. and H. T. Kung", title = "Neural network simulation at warp speed: How we go 17 million connections per second", journal = icnn, year = "1988", pages = II:155--161, } @TechReport{singer90, author = "Alexander Singer", title = "Implementations of Artificial Neural Networks on the Connection Machine", institution = "Thinking Machines Corporation", category = "cm2nnmapping", year = "1990", number = "RL90-2", } @InCollection{Zhang90, author = "Xiru Zhang, Micheal McKenna, Jill P. Mesirov and David L. Waltz", title = "An Efficient Implementation of the Back-Propagation Algorith m on the Connection Machine {CM-2}", booktitle = "Advances in Neural Information Processing Systems 2", editor = "David S. Touretzky", pages = "801--809", publisher = "Morgan Kaufmann", address = "San Mateo, {CA}", year = "1990", } @InProceedings{Brown88, author = "J. R. Brown, M. M. Garber and S. F. Vanable", title = "Artificial Neural Network on a {SIMD} Architecture", booktitle = "Proceedings of Frontiers '88, The Second Symposium on the Frontiers of Massively Parallel Processing", address = "Fairfax, {VA}", year = "1988", pages = "43--47", } @Article{kak88, author = "S. C. Kak", title = "A two-layered mesh array for matrix multiplication", journal = "Parallel Computing", year = "1988", volume = "6", number = "383--385", } @InProceedings{Chinn90, author = "G. Chinn, K. A. Grajski, C. Chen, C. Kuszmaul and S. Tomboulian", title = "Systolic Array Implementations of Neural Nets on the {MasPar MP-1} Massively Parallel Processor", booktitle = "International Joint Conference on Neural Networks", address = "San Deigo, {CA}", month = jun, year = "1990", pages = "169--173", } @Article{vipin92, author = "Shekhar, Shashi Kumar Vipin and Minesh Amin", title = "A Highly Parallel Formulation of Backpropagation on Hypercubes: {A} Summary of Results", journal = ijcnn, year = "92", month = nov, } @InProceedings{Manner89, author = "R. {M\"anner}, R. Horner, R. Hanser and A. Genthner", title = "Multiprocessor Simulation of Neural Networks with {NERV}", booktitle = "Proceedings of the Supercomputing Conference", address = "Reno, {NV}", year = "1989", pages = "457--465", } @InCollection{WilsonS89, author = "S. S. Wilson", title = "Neural Computing in a one dimensional {SIMD} array", year = "1989", journal = "Proceedings of the International Joint Conference on Artificial Intelligence", pages = "206--211", } % ------------------------------------------------------------------------ % From Igor Milosavlevich, Oct 6 39 @string{IEEENN="IEEE Trans. Neural Networks"} @string{ICNN="IEEE Int'l Conf. Neural Networks"} @string{IJCNN="IEEE/INNS Int'l Joint Conf. Neural Networks"} %@string{IJCNN="Int'l Joint Conf. on Neural Networks"} @string{NIPS = "Neural Information Processing Systems"} @string{MKP = "Morgan Kaufmann Publishers"} @article{Hwang89, author= "Jenq-Neng Hwang and Sun-Yuan Kung", title= "Parallel Algorithms/Architectures for Neural Networks", journal= "Journal of VLSI Signal Processing", volume= 1, number= 1, year= 1989, pages= "221--251" } @article{Atlas89, author= "Les E. Atlas and Yoshitake Suzuki", title= "Digital Systems for Artificial Neural Networks", journal= "IEEE Circuits and Devices Mag.", volume= 5, number= 6, month= nov, year= 1989, pages= "20--24" } @inproceedings{Rosenberg87, author= "C. Rosenberg and G. Bleloch", title= "An Implementation of Network Learning on the {Connection Machine}", booktitle="Proc. 10th International Joint Conference on Artificial Intelligence", year= 1987, } @inproceedings{Feild88, author= "W. B. Feild and J. K. Navlakha", title= "Transputer Implementation of {Hopfield} Neural Network", booktitle= ICNN # "not here!?", year= 1988, note= "(from Atlas89)" } @inproceedings{Beynon88, author= "T. Beynon ", title= "A Parallel Implementation of the Back-Propagation Algorithm on a Network of Transputers", booktitle= ICNN # "not here!?", year= 1988, note= "(from Atlas89)" } @incollection{Penz86, author= "P. A. Penz and R. Wiggins", title= "Digital Signal Processor Accelerators for Neural Network Simulations", editor= "Denker, J.S.", booktitle= "Neural Networks for Computing", publisher= "American Institute for Physics", address= "New York", year= 1986, note= "(from Atlas89)" } @inproceedings{Kuczewsk88, author= "R. Kuczewsk and M. Meyers and W. Crawford", title= "Neurocomputer Workstations and Processors", booktitle= ICNN # "not here!?", year= 1988, note= "(from Atlas89)" } @inproceedings{Works88, author= "G. Works", title= "The Creation of {Delta}: A New Concept of {ANS} Processing", booktitle= ICNN # "not here!?", year= 1988, note= "(from Atlas89)" } @inproceedings{Heileman92, author= "G. L. Heileman and M. Georgiopoulos and W. D. Roome", title= "Concurrent Object-Oriented Simulation of Neural Network Models", booktitle= IJCNN, volume= "II", pages= "553--559", year= 1992 } @article {Heileman90a, author = {G. L. Heileman and G. M. Papadourakis and M. Georgiopoulos}, title = {A Neural Net Associative Memory for Real-Time Applications}, journal = {Neural Computation}, volume = {2}, number = {1}, pages = {107-115}, year = {1990} } @article {Heileman92a, author = {G. L. Heileman and M. Georgiopoulos and W. D. Roome}, title = {A general framework for concurrent simulation of neural network models}, journal = {IEEE Transactions on Software Engineering}, volume = {18}, number = {7}, pages = {551-562}, year = {1992} } @inproceedings{Kohn91, author = "Phil Kohn and Jeff Bilmes and Nelson Morgan and James Beck", title = "Software for {ANN} training on a {Ring Array Processor}", pages = "781--788", crossref= "NIPS4" } %--------------------------------------------------------------------- % From Manavendra Misra Oct 8 1993 @techreport{misra.rmot_trep, author = "Manavendra Misra and V. K. Prasanna Kumar", title = "{I}mplementation of {N}eural {N}etworks on {M}assive {M}emory {O}rganizations", institution = "Dept. of {EE}-{S}ystems, {U}niversity of {S}outhern {C}alifornia", number = "{CENG} 89-30", month = "November", year = 1989 } @inproceedings{Misra.icpr, author="Manavendra Misra and V. K. Prasanna Kumar", title="{Massive Memory Organizations for Implementing Neural Networks}", booktitle="International Conference on Pattern Recognition", volume = "II", pages = "259-264", month="June", year=1990 } @inproceedings{Misra.spie, author="Manavendra Misra and V. K. Prasanna Kumar", title="Neural network simulation on a {Reduced Mesh of Trees} organization", booktitle="SPIE/SPSE Symposium on Electronic Imaging", month="Feb.", year=1990 } @incollection{misra.bayoumi, author="Manavendra Misra and V. K. Prasanna Kumar", title ="Implementation of {S}parse {N}eural {N}etworks on {F}ixed {S}ize {A}rrays", booktitle="Parallel Algorithms and Architectures for DSP Applications", editor = "Magdy A. Bayoumi", publisher="Kluwer Academic Publishers", year=1991 } @incollection{misra.soucek, author="Manavendra Misra and V. K. Prasanna Kumar", title ="Implementation of Neural Networks on Parallel Architectures", editor = "Branko Sou\v{c}ek", booktitle = "Fast Learning and Invariant Object Recognition", publisher="John Wiley and Sons, Inc.", year=1992, note = "In print" } @phdthesis{misra.thesis, author="Manavendra Misra", title="Implementation of {N}eural {N}etworks on {P}arallel {A}rchitectures", school="Dept. of EE-Systems, University of Southern California", year = 1992 } @Article{misra.tcs.92, author = "Manavendra Misra and V.K. Prasanna", title = "Implementation of Neural Networks on Massive Memory Organizations", journal = "IEEE Transactions on Circuits and Systems", year = 1992, volume = 39, number = 7, pages = "476 -- 480", month = "July" notes = "Problem reported with page numbers?" } % --- From Thilo Reski: thilo@uni-paderborn.de --------------------- @article{Fuji92, AUTHOR = {Y. Fujimoto, N. Fukada, T. Akabane}, JOURNAL = {IEEE Transactions on Neural Networks}, NUMBER = {6}, PAGES = {876-888}, TITLE = {Masssively parallel architectures for large scale neural network simulations }, VOLUME = {3}, YEAR = {1992} } @article{Maha92, AUTHOR = {I. Mahadevan, L. M. Patnaik}, JOURNAL = {Parallel Computing}, NUMBER = {4}, PAGES = {401-413}, TITLE = {Performance evaluation of bidirectional associative memory on a transputer-based parallel system}, VOLUME = {18}, YEAR = {1992} } @article{Orb92, AUTHOR = {Tom Tollenaere, Marc M. Van Hulle, Guy A. Orban}, JOURNAL = {Journal of Parallel and Distributed Computing}, NUMBER = {3}, PAGES = {286-305}, TITLE = {Parallel implementation and capabilities of entropy-driven artificial neural networks}, VOLUME = {14}, YEAR = {1992} } @article{Wang91, AUTHOR = {Chia Jui Wang and Chwan Hwa Wu}, JOURNAL = {Simulation}, NUMBER = {4}, PAGES = {223-232}, TITLE = {Parallel simulation of neural networks}, VOLUME = {56}, YEAR = {1991} } @article{Fuj90, AUTHOR = {Richard M. Fujimoto}, JOURNAL = {Communications of the ACM,}, MONTH = oct, NUMBER = {10}, PAGES = {31-53}, TITLE = {{Parallel Discrete Event Simulation}}, VOLUME = {33}, YEAR = {1990} } %--------------- From Timothy Brown (timxb@bellcore.com) ------------ @InCollection{timxb, author = "Timothy X Brown", title = "A Technique for Mapping Optimization Solutions to Hardware", booktitle = "Proceedings of the International Workshop on Applications of Neural Networks to Telecommunications", publisher = "Erlbaum", year = 1005, address = "Stockholm, Sweden", month = "May", note = "The use of hardware-in-the-loop learning to get better performance from analog vlsi networks for optimization problems." } %--------------- From Mike Glover (mag@curtech.mv.com) ---------------- @InCollection{glover, author = "M. Glover and W. T. Miller", title = "A Massively Parallel SIMD Processor for Neural Network and Machine Vision Applications", booktitle = "Advances in Neural Information Processings Systems 6", publisher = "Morgan Kaufmann", year = 1994, editor = "J.D. Cowan, G. Tesauro and J. Alspector", volume = 6, pages = "843--849", address = "San Mateo, CA" } %----------- Sparse mapping, mostly connected with the CNS ------- @TechReport{ICSI-TR-94-009, author = "Silvia M. Mueller and Benedict Gomes", title = "A Performance Analysis of the {CNS}-1 on Sparse Connectionist Networks", address = "Berkeley, CA", year = "February 1994", publisher = "International Computer Science Institute", number = "TR-94-009", } @TechReport{ICSI-TR-94-038, author = "Davide Anguita and B. Gomes", title = "{MBP} on {TO}: mixing floating- and fixed-point formats in {BP} learning", address = "Berkeley, CA", year = "August 1994", publisher = "International Computer Science Institute", number = "TR-94-038", } @InCollection{texas94, author = "Silvia Mueller and Benedict Gomes", title = "Efficient Mapping of Randomly Sparse Neural Networks on Parallel Vector Supercomputers", booktitle = "Sixth IEEE Symposium on Parallel and Distributed Processing", year = "1994", }