diff --git a/.Rbuildignore b/.Rbuildignore old mode 100755 new mode 100644 index fef17cb7..50a51ba3 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -1,8 +1,20 @@ -^.*\.Rproj$ +^CRAN-RELEASE$ +## MEMO: While gitignore uses shell glob, Rbuildignore uses regex. + ^\.Rproj\.user$ -^.*\.o$ -^\cleanup* -^.*\.Rmd -^\.editorconfig -^\.travis\.yml -^\.gitignore +\.Rproj$ +\.Rmd$ +\.o$ +^docs/ +^man-roxygen/ +^src/stan_files/.*\.o$ +^src/stan_files/.*\.cc$ +^src/stan_files/.*\.hpp$ +^README\.md$ +^LICENSE$ +^\.editorconfig$ +^\.gitignore$ +^\.travis\.yml$ +^_pkgdown\.yml$ +^codecov\.yml$ +^cran-comments\.md$ diff --git a/.gitignore b/.gitignore old mode 100755 new mode 100644 index 55107fef..6dd55f72 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,8 @@ +## MEMO: While gitignore uses shell glob, Rbuildignore uses regex. + +# RStudio files +.Rproj.user + # History files .Rhistory .Rapp.history @@ -30,13 +35,15 @@ vignettes/*.pdf *.utf8.md *.knit.md -# Source files -src/Modules* +# Source files (new) +src/stan_files/*.o +src/stan_files/*.o.tmp +src/stan_files/*.cc +src/stan_files/*.hpp +src/RcppExports.* src/hBayesDM.so src/hBayesDM.dll -src/include -src/RcppExports.* - -# RStudio files -.Rproj.user +src/init.o +# CRAN-RELEASE +CRAN-RELEASE diff --git a/.travis.yml b/.travis.yml index 487f26d8..c0c2bc08 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,43 +1,80 @@ language: r -r: - - oldrel - - release +sudo: false -# Be strict when checking our package -warnings_are_errors: false +branches: + only: + - master + - develop + - /release\/.*/ + - /hotfix\/.*/ + - /bugfix\/.*/ -# Sudo is required -sudo: true - -# Use trusty distribution of linux -dist: trusty - -# Use both linux and osx operating systems -os: - - linux - - osx +r_build_args: '--no-build-vignettes' +r_check_args: '--ignore-vignettes' # Use cache for packages cache: apt: true packages: true + ccache: true + +env: + global: + - MAKEFLAGS="-j 2" -# System dependencies for HTTP calling -addons: - apt: - packages: - - libgit2-dev - - libcurl4-openssl-dev - - libxml2-dev +matrix: + include: + - name: Ubuntu + g++-7 + os: linux + dist: trusty + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - gcc-7 + - g++-7 + - gfortran-7 + env: + - MATRIX_EVAL="CC=gcc-7 && CXX=g++-7" + - name: Ubuntu + g++-7 (BUILD_ALL) + os: linux + dist: trusty + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - gcc-7 + - g++-7 + - gfortran-7 + env: + - MATRIX_EVAL="CC=gcc-7 && CXX=g++-7" + - BUILD_ALL="true" + +before_install: + - eval "${MATRIX_EVAL}" + - mkdir -p ~/.R/ + - echo "CC = $CC" >> ~/.R/Makevars + - echo "CXX = ${CXX} -fPIC " >> ~/.R/Makevars + - echo "CXX14 = ${CXX} -fPIC -flto=2" >> ~/.R/Makevars + - echo "CXX14FLAGS = -mtune=native -march=native -Wno-ignored-attributes -O0" >> ~/.R/Makevars + +install: + - R -e 'install.packages("devtools", quiet = T)' -e 'devtools::install_deps(dep = T, quiet = T)' script: - - travis_wait 30 R CMD build . + - travis_wait 42 R CMD build . + - travis_wait 59 R CMD check hBayesDM*.tar.gz --as-cran r_binary_packages: - testthat -r_github_packages: - - jimhester/covr +# r_github_packages: +# - r-lib/covr + +# after_success: +# - Rscript -e 'covr::codecov()' -after_success: - - Rscript -e 'covr::codecov()' +after_failure: + - cat hBayesDM.Rcheck/00* diff --git a/DESCRIPTION b/DESCRIPTION old mode 100755 new mode 100644 index 0fedb6f0..9ff0a4ee --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,14 +1,13 @@ Package: hBayesDM Title: Hierarchical Bayesian Modeling of Decision-Making Tasks -Version: 0.6.3 -Date: 2018-10-30 +Version: 0.7.0 +Date: 2018-12-14 Author: Woo-Young Ahn [aut, cre], Nate Haines [aut], Lei Zhang [aut], Harhim Park [ctb], Jaeyeong Yang [ctb], - Dayeong Min [ctb], Jethro Lee [ctb] Authors@R: c( person("Woo-Young", "Ahn", email = "wooyoung.ahn@gmail.com", role = c("aut", "cre")), @@ -16,7 +15,6 @@ Authors@R: c( person("Lei", "Zhang", email = "bnuzhanglei2008@gmail.com", role = c("aut")), person("Harhim", "Park", email = "hrpark12@gmail.com", role = c("ctb")), person("Jaeyeong", "Yang", email = "jaeyeong.yang1125@gmail.com", role = c("ctb")), - person("Dayeong", "Min", email = "mindy2801@snu.ac.kr", role = c("ctb")), person("Jethro", "Lee", email = "dlemfh96@snu.ac.kr", role = c("ctb"))) Maintainer: Woo-Young Ahn Description: @@ -24,25 +22,88 @@ Description: a hierarchical Bayesian framework. Can perform hierarchical Bayesian analysis of various computational models with a single line of coding. Depends: - R (>= 3.4.0), - Rcpp (>= 0.12.0), - methods + R (>= 3.4.0) Imports: + methods, + Rcpp (>= 0.12.0), rstan (>= 2.18.1), loo (>= 2.0), grid, parallel, - ggplot2 + ggplot2, + data.table LinkingTo: - StanHeaders (>= 2.18.0), - rstan (>= 2.18.1), BH (>= 1.66.0), Rcpp (>= 0.12.0), - RcppEigen (>= 0.3.3.3.0) + RcppEigen (>= 0.3.3.3.0), + rstan (>= 2.18.1), + StanHeaders (>= 2.18.0) URL: https://rpubs.com/CCSL/hBayesDM BugReports: https://github.com/CCS-Lab/hBayesDM/issues License: GPL-3 LazyData: true NeedsCompilation: yes Encoding: UTF-8 -RoxygenNote: 6.1.0 +RoxygenNote: 6.1.1 +SystemRequirements: GNU make +Collate: + 'HDIofMCMC.R' + 'stanmodels.R' + 'settings.R' + 'hBayesDM_model.R' + 'bandit2arm_delta.R' + 'bandit4arm2_kalman_filter.R' + 'bandit4arm_4par.R' + 'bandit4arm_lapse.R' + 'bart_par4.R' + 'choiceRT_ddm.R' + 'choiceRT_ddm_single.R' + 'choiceRT_lba.R' + 'choiceRT_lba_single.R' + 'cra_exp.R' + 'cra_linear.R' + 'dbdm_prob_weight.R' + 'dd_cs.R' + 'dd_cs_single.R' + 'dd_exp.R' + 'dd_hyperbolic.R' + 'dd_hyperbolic_single.R' + 'estimate_mode.R' + 'extract_ic.R' + 'gng_m1.R' + 'gng_m2.R' + 'gng_m3.R' + 'gng_m4.R' + 'hBayesDM.R' + 'igt_orl.R' + 'igt_pvl_decay.R' + 'igt_pvl_delta.R' + 'igt_vpp.R' + 'multiplot.R' + 'peer_ocu.R' + 'plot.hBayesDM.R' + 'plotDist.R' + 'plotHDI.R' + 'plotInd.R' + 'printFit.R' + 'prl_ewa.R' + 'prl_fictitious.R' + 'prl_fictitious_multipleB.R' + 'prl_fictitious_rp.R' + 'prl_fictitious_rp_woa.R' + 'prl_fictitious_woa.R' + 'prl_rp.R' + 'prl_rp_multipleB.R' + 'pst_gainloss_Q.R' + 'ra_noLA.R' + 'ra_noRA.R' + 'ra_prospect.R' + 'rdt_happiness.R' + 'rhat.R' + 'ts_par4.R' + 'ts_par6.R' + 'ts_par7.R' + 'ug_bayes.R' + 'ug_delta.R' + 'wcs_sql.R' + 'zzz.R' diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..ab7fe466 --- /dev/null +++ b/LICENSE @@ -0,0 +1,595 @@ +GNU General Public License +========================== + +_Version 3, 29 June 2007_ +_Copyright © 2007 Free Software Foundation, Inc. <>_ + +Everyone is permitted to copy and distribute verbatim copies of this license +document, but changing it is not allowed. + +## Preamble + +The GNU General Public License is a free, copyleft license for software and other +kinds of works. + +The licenses for most software and other practical works are designed to take away +your freedom to share and change the works. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change all versions of a +program--to make sure it remains free software for all its users. We, the Free +Software Foundation, use the GNU General Public License for most of our software; it +applies also to any other work released this way by its authors. You can apply it to +your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General +Public Licenses are designed to make sure that you have the freedom to distribute +copies of free software (and charge for them if you wish), that you receive source +code or can get it if you want it, that you can change the software or use pieces of +it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or +asking you to surrender the rights. Therefore, you have certain responsibilities if +you distribute copies of the software, or if you modify it: responsibilities to +respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, +you must pass on to the recipients the same freedoms that you received. You must make +sure that they, too, receive or can get the source code. And you must show them these +terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: **(1)** assert +copyright on the software, and **(2)** offer you this License giving you legal permission +to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is +no warranty for this free software. For both users' and authors' sake, the GPL +requires that modified versions be marked as changed, so that their problems will not +be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of +the software inside them, although the manufacturer can do so. This is fundamentally +incompatible with the aim of protecting users' freedom to change the software. The +systematic pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we have designed +this version of the GPL to prohibit the practice for those products. If such problems +arise substantially in other domains, we stand ready to extend this provision to +those domains in future versions of the GPL, as needed to protect the freedom of +users. + +Finally, every program is threatened constantly by software patents. States should +not allow patents to restrict development and use of software on general-purpose +computers, but in those that do, we wish to avoid the special danger that patents +applied to a free program could make it effectively proprietary. To prevent this, the +GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +## TERMS AND CONDITIONS + +### 0. Definitions + +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this +License. Each licensee is addressed as “you”. “Licensees” and +“recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in +a fashion requiring copyright permission, other than the making of an exact copy. The +resulting work is called a “modified version” of the earlier work or a +work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on +the Program. + +To “propagate” a work means to do anything with it that, without +permission, would make you directly or secondarily liable for infringement under +applicable copyright law, except executing it on a computer or modifying a private +copy. Propagation includes copying, distribution (with or without modification), +making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through a computer +network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the +extent that it includes a convenient and prominently visible feature that **(1)** +displays an appropriate copyright notice, and **(2)** tells the user that there is no +warranty for the work (except to the extent that warranties are provided), that +licensees may convey the work under this License, and how to view a copy of this +License. If the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + +### 1. Source Code + +The “source code” for a work means the preferred form of the work for +making modifications to it. “Object code” means any non-source form of a +work. + +A “Standard Interface” means an interface that either is an official +standard defined by a recognized standards body, or, in the case of interfaces +specified for a particular programming language, one that is widely used among +developers working in that language. + +The “System Libraries” of an executable work include anything, other than +the work as a whole, that **(a)** is included in the normal form of packaging a Major +Component, but which is not part of that Major Component, and **(b)** serves only to +enable use of the work with that Major Component, or to implement a Standard +Interface for which an implementation is available to the public in source code form. +A “Major Component”, in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system (if any) on which +the executable work runs, or a compiler used to produce the work, or an object code +interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the +source code needed to generate, install, and (for an executable work) run the object +code and to modify the work, including scripts to control those activities. However, +it does not include the work's System Libraries, or general-purpose tools or +generally available free programs which are used unmodified in performing those +activities but which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for the work, and +the source code for shared libraries and dynamically linked subprograms that the work +is specifically designed to require, such as by intimate data communication or +control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate +automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +### 2. Basic Permissions + +All rights granted under this License are granted for the term of copyright on the +Program, and are irrevocable provided the stated conditions are met. This License +explicitly affirms your unlimited permission to run the unmodified Program. The +output from running a covered work is covered by this License only if the output, +given its content, constitutes a covered work. This License acknowledges your rights +of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without +conditions so long as your license otherwise remains in force. You may convey covered +works to others for the sole purpose of having them make modifications exclusively +for you, or provide you with facilities for running those works, provided that you +comply with the terms of this License in conveying all material for which you do not +control copyright. Those thus making or running the covered works for you must do so +exclusively on your behalf, under your direction and control, on terms that prohibit +them from making any copies of your copyrighted material outside their relationship +with you. + +Conveying under any other circumstances is permitted solely under the conditions +stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +### 3. Protecting Users' Legal Rights From Anti-Circumvention Law + +No covered work shall be deemed part of an effective technological measure under any +applicable law fulfilling obligations under article 11 of the WIPO copyright treaty +adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention +of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of +technological measures to the extent such circumvention is effected by exercising +rights under this License with respect to the covered work, and you disclaim any +intention to limit operation or modification of the work as a means of enforcing, +against the work's users, your or third parties' legal rights to forbid circumvention +of technological measures. + +### 4. Conveying Verbatim Copies + +You may convey verbatim copies of the Program's source code as you receive it, in any +medium, provided that you conspicuously and appropriately publish on each copy an +appropriate copyright notice; keep intact all notices stating that this License and +any non-permissive terms added in accord with section 7 apply to the code; keep +intact all notices of the absence of any warranty; and give all recipients a copy of +this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer +support or warranty protection for a fee. + +### 5. Conveying Modified Source Versions + +You may convey a work based on the Program, or the modifications to produce it from +the Program, in the form of source code under the terms of section 4, provided that +you also meet all of these conditions: + +* **a)** The work must carry prominent notices stating that you modified it, and giving a +relevant date. +* **b)** The work must carry prominent notices stating that it is released under this +License and any conditions added under section 7. This requirement modifies the +requirement in section 4 to “keep intact all notices”. +* **c)** You must license the entire work, as a whole, under this License to anyone who +comes into possession of a copy. This License will therefore apply, along with any +applicable section 7 additional terms, to the whole of the work, and all its parts, +regardless of how they are packaged. This License gives no permission to license the +work in any other way, but it does not invalidate such permission if you have +separately received it. +* **d)** If the work has interactive user interfaces, each must display Appropriate Legal +Notices; however, if the Program has interactive interfaces that do not display +Appropriate Legal Notices, your work need not make them do so. + +A compilation of a covered work with other separate and independent works, which are +not by their nature extensions of the covered work, and which are not combined with +it such as to form a larger program, in or on a volume of a storage or distribution +medium, is called an “aggregate” if the compilation and its resulting +copyright are not used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work in an aggregate +does not cause this License to apply to the other parts of the aggregate. + +### 6. Conveying Non-Source Forms + +You may convey a covered work in object code form under the terms of sections 4 and +5, provided that you also convey the machine-readable Corresponding Source under the +terms of this License, in one of these ways: + +* **a)** Convey the object code in, or embodied in, a physical product (including a +physical distribution medium), accompanied by the Corresponding Source fixed on a +durable physical medium customarily used for software interchange. +* **b)** Convey the object code in, or embodied in, a physical product (including a +physical distribution medium), accompanied by a written offer, valid for at least +three years and valid for as long as you offer spare parts or customer support for +that product model, to give anyone who possesses the object code either **(1)** a copy of +the Corresponding Source for all the software in the product that is covered by this +License, on a durable physical medium customarily used for software interchange, for +a price no more than your reasonable cost of physically performing this conveying of +source, or **(2)** access to copy the Corresponding Source from a network server at no +charge. +* **c)** Convey individual copies of the object code with a copy of the written offer to +provide the Corresponding Source. This alternative is allowed only occasionally and +noncommercially, and only if you received the object code with such an offer, in +accord with subsection 6b. +* **d)** Convey the object code by offering access from a designated place (gratis or for +a charge), and offer equivalent access to the Corresponding Source in the same way +through the same place at no further charge. You need not require recipients to copy +the Corresponding Source along with the object code. If the place to copy the object +code is a network server, the Corresponding Source may be on a different server +(operated by you or a third party) that supports equivalent copying facilities, +provided you maintain clear directions next to the object code saying where to find +the Corresponding Source. Regardless of what server hosts the Corresponding Source, +you remain obligated to ensure that it is available for as long as needed to satisfy +these requirements. +* **e)** Convey the object code using peer-to-peer transmission, provided you inform +other peers where the object code and Corresponding Source of the work are being +offered to the general public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded from the +Corresponding Source as a System Library, need not be included in conveying the +object code work. + +A “User Product” is either **(1)** a “consumer product”, which +means any tangible personal property which is normally used for personal, family, or +household purposes, or **(2)** anything designed or sold for incorporation into a +dwelling. In determining whether a product is a consumer product, doubtful cases +shall be resolved in favor of coverage. For a particular product received by a +particular user, “normally used” refers to a typical or common use of +that class of product, regardless of the status of the particular user or of the way +in which the particular user actually uses, or expects or is expected to use, the +product. A product is a consumer product regardless of whether the product has +substantial commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, +procedures, authorization keys, or other information required to install and execute +modified versions of a covered work in that User Product from a modified version of +its Corresponding Source. The information must suffice to ensure that the continued +functioning of the modified object code is in no case prevented or interfered with +solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for +use in, a User Product, and the conveying occurs as part of a transaction in which +the right of possession and use of the User Product is transferred to the recipient +in perpetuity or for a fixed term (regardless of how the transaction is +characterized), the Corresponding Source conveyed under this section must be +accompanied by the Installation Information. But this requirement does not apply if +neither you nor any third party retains the ability to install modified object code +on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to +continue to provide support service, warranty, or updates for a work that has been +modified or installed by the recipient, or for the User Product in which it has been +modified or installed. Access to a network may be denied when the modification itself +materially and adversely affects the operation of the network or violates the rules +and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with +this section must be in a format that is publicly documented (and with an +implementation available to the public in source code form), and must require no +special password or key for unpacking, reading or copying. + +### 7. Additional Terms + +“Additional permissions” are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. Additional +permissions that are applicable to the entire Program shall be treated as though they +were included in this License, to the extent that they are valid under applicable +law. If additional permissions apply only to part of the Program, that part may be +used separately under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any +additional permissions from that copy, or from any part of it. (Additional +permissions may be written to require their own removal in certain cases when you +modify the work.) You may place additional permissions on material, added by you to a +covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a +covered work, you may (if authorized by the copyright holders of that material) +supplement the terms of this License with terms: + +* **a)** Disclaiming warranty or limiting liability differently from the terms of +sections 15 and 16 of this License; or +* **b)** Requiring preservation of specified reasonable legal notices or author +attributions in that material or in the Appropriate Legal Notices displayed by works +containing it; or +* **c)** Prohibiting misrepresentation of the origin of that material, or requiring that +modified versions of such material be marked in reasonable ways as different from the +original version; or +* **d)** Limiting the use for publicity purposes of names of licensors or authors of the +material; or +* **e)** Declining to grant rights under trademark law for use of some trade names, +trademarks, or service marks; or +* **f)** Requiring indemnification of licensors and authors of that material by anyone +who conveys the material (or modified versions of it) with contractual assumptions of +liability to the recipient, for any liability that these contractual assumptions +directly impose on those licensors and authors. + +All other non-permissive additional terms are considered “further +restrictions” within the meaning of section 10. If the Program as you received +it, or any part of it, contains a notice stating that it is governed by this License +along with a term that is a further restriction, you may remove that term. If a +license document contains a further restriction but permits relicensing or conveying +under this License, you may add to a covered work material governed by the terms of +that license document, provided that the further restriction does not survive such +relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in +the relevant source files, a statement of the additional terms that apply to those +files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a +separately written license, or stated as exceptions; the above requirements apply +either way. + +### 8. Termination + +You may not propagate or modify a covered work except as expressly provided under +this License. Any attempt otherwise to propagate or modify it is void, and will +automatically terminate your rights under this License (including any patent licenses +granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a +particular copyright holder is reinstated **(a)** provisionally, unless and until the +copyright holder explicitly and finally terminates your license, and **(b)** permanently, +if the copyright holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently +if the copyright holder notifies you of the violation by some reasonable means, this +is the first time you have received notice of violation of this License (for any +work) from that copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of +parties who have received copies or rights from you under this License. If your +rights have been terminated and not permanently reinstated, you do not qualify to +receive new licenses for the same material under section 10. + +### 9. Acceptance Not Required for Having Copies + +You are not required to accept this License in order to receive or run a copy of the +Program. Ancillary propagation of a covered work occurring solely as a consequence of +using peer-to-peer transmission to receive a copy likewise does not require +acceptance. However, nothing other than this License grants you permission to +propagate or modify any covered work. These actions infringe copyright if you do not +accept this License. Therefore, by modifying or propagating a covered work, you +indicate your acceptance of this License to do so. + +### 10. Automatic Licensing of Downstream Recipients + +Each time you convey a covered work, the recipient automatically receives a license +from the original licensors, to run, modify and propagate that work, subject to this +License. You are not responsible for enforcing compliance by third parties with this +License. + +An “entity transaction” is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an organization, or +merging organizations. If propagation of a covered work results from an entity +transaction, each party to that transaction who receives a copy of the work also +receives whatever licenses to the work the party's predecessor in interest had or +could give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if the predecessor +has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or +affirmed under this License. For example, you may not impose a license fee, royalty, +or other charge for exercise of rights granted under this License, and you may not +initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging +that any patent claim is infringed by making, using, selling, offering for sale, or +importing the Program or any portion of it. + +### 11. Patents + +A “contributor” is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The work thus +licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or +controlled by the contributor, whether already acquired or hereafter acquired, that +would be infringed by some manner, permitted by this License, of making, using, or +selling its contributor version, but do not include claims that would be infringed +only as a consequence of further modification of the contributor version. For +purposes of this definition, “control” includes the right to grant patent +sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license +under the contributor's essential patent claims, to make, use, sell, offer for sale, +import and otherwise run, modify and propagate the contents of its contributor +version. + +In the following three paragraphs, a “patent license” is any express +agreement or commitment, however denominated, not to enforce a patent (such as an +express permission to practice a patent or covenant not to sue for patent +infringement). To “grant” such a patent license to a party means to make +such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the +Corresponding Source of the work is not available for anyone to copy, free of charge +and under the terms of this License, through a publicly available network server or +other readily accessible means, then you must either **(1)** cause the Corresponding +Source to be so available, or **(2)** arrange to deprive yourself of the benefit of the +patent license for this particular work, or **(3)** arrange, in a manner consistent with +the requirements of this License, to extend the patent license to downstream +recipients. “Knowingly relying” means you have actual knowledge that, but +for the patent license, your conveying the covered work in a country, or your +recipient's use of the covered work in a country, would infringe one or more +identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you +convey, or propagate by procuring conveyance of, a covered work, and grant a patent +license to some of the parties receiving the covered work authorizing them to use, +propagate, modify or convey a specific copy of the covered work, then the patent +license you grant is automatically extended to all recipients of the covered work and +works based on it. + +A patent license is “discriminatory” if it does not include within the +scope of its coverage, prohibits the exercise of, or is conditioned on the +non-exercise of one or more of the rights that are specifically granted under this +License. You may not convey a covered work if you are a party to an arrangement with +a third party that is in the business of distributing software, under which you make +payment to the third party based on the extent of your activity of conveying the +work, and under which the third party grants, to any of the parties who would receive +the covered work from you, a discriminatory patent license **(a)** in connection with +copies of the covered work conveyed by you (or copies made from those copies), or **(b)** +primarily for and in connection with specific products or compilations that contain +the covered work, unless you entered into that arrangement, or that patent license +was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied +license or other defenses to infringement that may otherwise be available to you +under applicable patent law. + +### 12. No Surrender of Others' Freedom + +If conditions are imposed on you (whether by court order, agreement or otherwise) +that contradict the conditions of this License, they do not excuse you from the +conditions of this License. If you cannot convey a covered work so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not convey it at all. For example, if you +agree to terms that obligate you to collect a royalty for further conveying from +those to whom you convey the Program, the only way you could satisfy both those terms +and this License would be to refrain entirely from conveying the Program. + +### 13. Use with the GNU Affero General Public License + +Notwithstanding any other provision of this License, you have permission to link or +combine any covered work with a work licensed under version 3 of the GNU Affero +General Public License into a single combined work, and to convey the resulting work. +The terms of this License will continue to apply to the part which is the covered +work, but the special requirements of the GNU Affero General Public License, section +13, concerning interaction through a network will apply to the combination as such. + +### 14. Revised Versions of this License + +The Free Software Foundation may publish revised and/or new versions of the GNU +General Public License from time to time. Such new versions will be similar in spirit +to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that +a certain numbered version of the GNU General Public License “or any later +version” applies to it, you have the option of following the terms and +conditions either of that numbered version or of any later version published by the +Free Software Foundation. If the Program does not specify a version number of the GNU +General Public License, you may choose any version ever published by the Free +Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU +General Public License can be used, that proxy's public statement of acceptance of a +version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no +additional obligations are imposed on any author or copyright holder as a result of +your choosing to follow a later version. + +### 15. Disclaimer of Warranty + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER +EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE +QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE +DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +### 16. Limitation of Liability + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY +COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS +PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, +INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE +OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE +WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +### 17. Interpretation of Sections 15 and 16 + +If the disclaimer of warranty and limitation of liability provided above cannot be +given local legal effect according to their terms, reviewing courts shall apply local +law that most closely approximates an absolute waiver of all civil liability in +connection with the Program, unless a warranty or assumption of liability accompanies +a copy of the Program in return for a fee. + +_END OF TERMS AND CONDITIONS_ + +## How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible use to +the public, the best way to achieve this is to make it free software which everyone +can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach them +to the start of each source file to most effectively state the exclusion of warranty; +and each file should have at least the “copyright” line and a pointer to +where the full notice is found. + + hBayesDM: An R package for hierarchical Bayesian modeling of RLDM tasks. + Copyright (C) 2018 CCS-Lab + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + +If the program does terminal interaction, make it output a short notice like this +when it starts in an interactive mode: + + hBayesDM Copyright (C) 2018 CCS-Lab + This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + +The hypothetical commands `show w` and `show c` should show the appropriate parts of +the General Public License. Of course, your program's commands might be different; +for a GUI interface, you would use an “about box”. + +You should also get your employer (if you work as a programmer) or school, if any, to +sign a “copyright disclaimer” for the program, if necessary. For more +information on this, and how to apply and follow the GNU GPL, see +<>. + +The GNU General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may consider it +more useful to permit linking proprietary applications with the library. If this is +what you want to do, use the GNU Lesser General Public License instead of this +License. But first, please read +<>. diff --git a/NAMESPACE b/NAMESPACE index ec5c2f1a..b40eb63c 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -3,6 +3,7 @@ S3method(plot,hBayesDM) export(HDIofMCMC) export(bandit2arm_delta) +export(bandit4arm2_kalman_filter) export(bandit4arm_4par) export(bandit4arm_lapse) export(bart_par4) @@ -12,6 +13,7 @@ export(choiceRT_lba) export(choiceRT_lba_single) export(cra_exp) export(cra_linear) +export(dbdm_prob_weight) export(dd_cs) export(dd_cs_single) export(dd_exp) @@ -23,6 +25,7 @@ export(gng_m1) export(gng_m2) export(gng_m3) export(gng_m4) +export(hBayesDM_model) export(igt_orl) export(igt_pvl_decay) export(igt_pvl_delta) @@ -55,6 +58,7 @@ export(ug_delta) export(wcs_sql) import(Rcpp) import(methods) +importFrom(data.table,fread) importFrom(ggplot2,aes) importFrom(ggplot2,geom_histogram) importFrom(ggplot2,geom_segment) @@ -80,8 +84,11 @@ importFrom(rstan,stan_plot) importFrom(rstan,summary) importFrom(rstan,traceplot) importFrom(rstan,vb) +importFrom(stats,aggregate) +importFrom(stats,complete.cases) importFrom(stats,density) importFrom(stats,median) importFrom(stats,qnorm) +importFrom(utils,head) importFrom(utils,read.table) useDynLib(hBayesDM, .registration = TRUE) diff --git a/inst/NEWS b/NEWS.md old mode 100755 new mode 100644 similarity index 54% rename from inst/NEWS rename to NEWS.md index 0165d390..05bbe4de --- a/inst/NEWS +++ b/NEWS.md @@ -1,27 +1,42 @@ -Oct 30, 2018 (0.6.3) +# hBayesDM 0.7.0 + +* Now, in default, you should build a Stan file into a binary for the first time to use it. To build all the models on installation, you should set an environmental variable `BUILD_ALL` to `true` before installation. +* Now all the implemented models are refactored using `hBayesDM_model` function. You don't have to change anything to use them, but developers can easily implement new model now! +* We added a Kalman filter model for 4-armed bandit task (`bandit4arm2_kalman_filter`; Daw et al., 2006) and a probability weighting function for general description-based tasks (`dbdm_prob_weight`; Erev et al., 2010; Hertwig et al., 2004; Jessup et al., 2008). +* Initial values of parameter estimation for some models are updated as plausible values, and the parameter boundaries of several models are fixed (see more on issue #63 and #64 in Github). +* Exponential and linear models for choice under risk and ambiguity task now have four model regressors: `sv`, `sv_fix`, `sv_var`, and `p_var`. +* Fix the Travix CI settings and related codes to be properly passed. + +# hBayesDM 0.6.3 + * Update the dependencies on rstan (>= 2.18.1) * No changes on model files, as same as the version 0.6.2 -Oct 19, 2018 (0.6.2) +# hBayesDM 0.6.2 + * Fix an error on choiceRT_ddm (#44) -Oct 9, 2018 (0.6.1) +# hBayesDM 0.6.1 + * Solve an issue with built binary files. * Fix an error on peer_ocu with misplaced parentheses. -Sep 11, 2018 (0.6.0) +# hBayesDM 0.6.0 + * Add new tasks (Balloon Analogue Risk Task, Choice under Risk and Ambiguity Task, Probabilistic Selection Task, Risky Decision Task (a.k.a. Happiness task), Wisconsin Card Sorting Task) * Add a new model for the Iowa Gambling Task (igt_orl) * Change priors (Half-Cauchy(0, 5) --> Half-Cauchy(0, 1) or Half-Normal(0, 0.2) * printFit function now provides LOOIC weights and/or WAIC weights -March 26, 2018 (0.5.1) +# hBayesDM 0.5.1 + * Add models for the Two Step task * Add models without indecision point parameter (alpha) for the PRL task (prl_*_woa.stan) * Model-based regressors for the PRL task are now available * For the PRL task & prl_fictitious.stan & prl_fictitious_rp.stan --> change the range of alpha (indecision point) from [0, 1] to [-Inf, Inf] -Dec 25, 2017 (0.5.0) +# hBayesDM 0.5.0 + * Support variational Bayesian methods (vb=TRUE) * Allow posterior predictive checks, except for drift-diffusion models (inc_postpred=TRUE) * Add the peer influence task (Chung et al., 2015, USE WITH CAUTION for now and PLEASE GIVE US FEEDBACK!) @@ -31,7 +46,8 @@ Dec 25, 2017 (0.5.0) * Email feature is disabled as R mail package does not allow users to send anonymous emails anymore. * When outputs are saved as a file (*.RData), the file name now contains the name of the data file. -May 20, 2017 (0.4.0) +# hBayesDM 0.4.0 + * Add a choice reaction time task and evidence accumulation models - Drift diffusion model (both hierarchical and single-subject) - Linear Ballistic Accumulator (LBA) model (both hierarchical and single-subject) @@ -40,47 +56,54 @@ May 20, 2017 (0.4.0) * Standardize variable names across all models (e.g., `rewlos` --> `outcome` for all models) * Separate versions for CRAN and GitHub. All models/features are identical but the GitHub version contains precompilled models. -Jan 22, 2017 (0.3.1) +# hBayesDM 0.3.1 + * Remove dependence on the modeest package. Now use a built-in function to estimate the mode of a posterior distribution. * Rewrite the "printFit" function. -Jan 20, 2017 (0.3.0) +# hBayesDM 0.3.0 + * Made several changes following the guidelines for R packages providing interfaces to Stan. * Stan models are precompiled and models will run immediately when called. * The default number of chains is set to 4. * The default value of `adapt_delta` is set to 0.95 to reduce the potential for divergences. * The “printFit” function uses LOOIC by default. Users can select WAIC or both (LOOIC & WAIC) if needed. -Dec 28, 2016 (0.2.3.3) -1. Change - * Add help files - * Add a function for checking Rhat values (rhat). - * Change a link to its tutorial website - -Dec 21, 2016 (0.2.3.2) -1. Change - * Use wide normal distributions for unbounded parameters (gng_* models). - * Automatic removal of rows (trials) containing NAs. - -Sep 29, 2016 (0.2.3.1) -1. Change - * Add a function for plotting individual parameters (plotInd) - -Sat July 16 2016 (0.2.3) -1. Change - * Add a new task: the Ultimatum Game - * Add new models for the Probabilistic Reversal Learning and Risk Aversion tasks - * ‘bandit2arm’ -> change its name to ‘bandit2arm_delta’. Now all model names are in the same format (i.e., TASK_MODEL). - * Users can extract model-based regressors from gng_m* models - * Include the option of customizing control parameters (adapt_delta, max_treedepth, stepsize) - * ‘plotHDI’ function -> add ‘fontSize’ argument & change the color of histogram - -Sat Apr 02 2016 (0.2.1) -1. Bug fixes - * All models: Fix errors when indPars=“mode” - * ra_prospect model: Add description for column names of a data (*.txt) file -2. Change - * Change standard deviations of ‘b’ and ‘pi’ priors in gng_* models - -Fri Mar 25 2016 (0.2.0) +# hBayesDM 0.2.3.3 + +* Add help files +* Add a function for checking Rhat values (rhat). +* Change a link to its tutorial website + +# hBayesDM 0.2.3.2 + +* Use wide normal distributions for unbounded parameters (gng_* models). +* Automatic removal of rows (trials) containing NAs. + +# hBayesDM 0.2.3.1 + +* Add a function for plotting individual parameters (plotInd) + +# hBayesDM 0.2.3 + +* Add a new task: the Ultimatum Game +* Add new models for the Probabilistic Reversal Learning and Risk Aversion tasks +* ‘bandit2arm’ -> change its name to ‘bandit2arm_delta’. Now all model names are in the same format (i.e., TASK_MODEL). +* Users can extract model-based regressors from gng_m* models +* Include the option of customizing control parameters (adapt_delta, max_treedepth, stepsize) +* ‘plotHDI’ function -> add ‘fontSize’ argument & change the color of histogram + +# hBayesDM 0.2.1 + +## Bug fixes + +* All models: Fix errors when indPars=“mode” +* ra_prospect model: Add description for column names of a data (*.txt) file + +## Change + +* Change standard deviations of ‘b’ and ‘pi’ priors in gng_* models + +# hBayesDM 0.2.0 + Initially released. diff --git a/R/HDIofMCMC.R b/R/HDIofMCMC.R old mode 100755 new mode 100644 diff --git a/R/bandit2arm_delta.R b/R/bandit2arm_delta.R old mode 100755 new mode 100644 index b6550d46..d5343bcd --- a/R/bandit2arm_delta.R +++ b/R/bandit2arm_delta.R @@ -1,345 +1,69 @@ -#' Two-Arm Bandit Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Two-Arm Bandit Task (e.g., Erev et al., 2010; Hertwig et al., 2004) using the following parameters: "A" (learning rate), "tau" (inverse temperature). -#' -#' \strong{MODEL:} -#' Rescorla-Wagner (delta) model -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"bandit2arm_delta"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters.} -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Two-Arm Bandit Task, there should be three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{Should contain a unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{Should contain a integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in 2-arm bandit task).} -#' \item{\code{"outcome"}}{Should contain outcomes within each given trial (e.g., 1 = reward, -1 = loss).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION bandit2arm_delta +#' @templateVar TASK_NAME 2-Armed Bandit Task +#' @templateVar TASK_CITE (Erev et al., 2010; Hertwig et al., 2004) +#' @templateVar MODEL_NAME Rescorla-Wagner (Delta) Model +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "A" (learning rate), "tau" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on the given trial: 1 or 2.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{Integer value representing the outcome of the given trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Erev, I., Ert, E., Roth, A. E., Haruvy, E., Herzog, S. M., Hau, R., et al. (2010). A choice prediction competition: Choices -#' from experience and from description. Journal of Behavioral Decision Making, 23(1), 15-47. http://doi.org/10.1002/bdm.683 -#' -#' Hertwig, R., Barron, G., Weber, E. U., & Erev, I. (2004). Decisions From Experience and the Effect of Rare Events in Risky -#' Choice. Psychological Science, 15(8), 534-539. http://doi.org/10.1111/j.0956-7976.2004.00715.x -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- bandit2arm_delta(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -bandit2arm_delta <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "bandit2arm_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 2 - POI <- c("mu_A", "mu_tau", - "sigma", - "A", "tau", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "bandit2arm_delta" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxTrials)) - outcome <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - choice[i, 1:useTrials] <- tmp$choice - outcome[i, 1:useTrials] <- tmp$outcome - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2] / 5)), - sigma = c(1.0, 1.0), - A_pr = rep(qnorm(inits_fixed[1]), numSubjs), - tau_pr = rep(qnorm(inits_fixed[2]/5), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } - else{ - options(mc.cores = ncore) +#' Erev, I., Ert, E., Roth, A. E., Haruvy, E., Herzog, S. M., Hau, R., et al. (2010). A choice +#' prediction competition: Choices from experience and from description. Journal of Behavioral +#' Decision Making, 23(1), 15-47. http://doi.org/10.1002/bdm.683 +#' +#' Hertwig, R., Barron, G., Weber, E. U., & Erev, I. (2004). Decisions From Experience and the +#' Effect of Rare Events in Risky Choice. Psychological Science, 15(8), 534-539. +#' http://doi.org/10.1111/j.0956-7976.2004.00715.x + +bandit2arm_delta <- hBayesDM_model( + task_name = "bandit2arm", + model_name = "delta", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("A" = c(0, 0.5, 1), + "tau" = c(0, 1, 5)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + outcome[i, 1:t] <- DT_subj$outcome } - } else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$bandit2arm_delta - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - A <- parVals$A - tau <- parVals$tau - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(A[, i]), - mean(tau[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(A[, i]), - median(tau[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(A[, i]), - estimate_mode(tau[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("A", - "tau", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/bandit4arm2_kalman_filter.R b/R/bandit4arm2_kalman_filter.R new file mode 100644 index 00000000..a8c92e8b --- /dev/null +++ b/R/bandit4arm2_kalman_filter.R @@ -0,0 +1,62 @@ +#' @templateVar MODEL_FUNCTION bandit4arm2_kalman_filter +#' @templateVar CONTRIBUTOR \href{https://zohyos7.github.io}{Yoonseo Zoh}, \href{https://lei-zhang.net/}{Lei Zhang} +#' @templateVar TASK_NAME 4-Armed Bandit Task (2) +#' @templateVar MODEL_NAME Kalman Filter +#' @templateVar MODEL_CITE (Daw et al., 2006, Nature) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "lambda" (decay factor), "theta" (decay center), "beta" (inverse softmax temperature), "mu0" (anticipated initial mean of all 4 options), "sigma0" (anticipated initial sd (uncertainty factor) of all 4 options), "sigmaD" (sd of diffusion noise) +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on the given trial: 1, 2, 3, or 4.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{Integer value representing the outcome of the given trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation +#' +#' @export +#' @include hBayesDM_model.R +#' +#' @references +#' Daw, N. D., O'Doherty, J. P., Dayan, P., Seymour, B., & Dolan, R. J. (2006). Cortical substrates +#' for exploratory decisions in humans. Nature, 441(7095), 876-879. + +bandit4arm2_kalman_filter <- hBayesDM_model( + task_name = "bandit4arm2", + model_name = "kalman_filter", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("lambda" = c(0, 0.9, 1), + "theta" = c(0, 50, 100), + "beta" = c(0, 0.1, 1), + "mu0" = c(0, 85, 100), + "sigma0" = c(0, 6, 15), + "sigmaD" = c(0, 3, 15)), + preprocess_func = function(raw_data, general_info) { + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + choice <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + outcome[i, 1:t] <- DT_subj$outcome + } + + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + return(data_list) + } +) + diff --git a/R/bandit4arm_4par.R b/R/bandit4arm_4par.R index 3006c5ae..95a868e9 100644 --- a/R/bandit4arm_4par.R +++ b/R/bandit4arm_4par.R @@ -1,359 +1,70 @@ -#' 4-armed bandit task -#' -#' @description -#' Hierarchical Bayesian Modeling of the 4-armed bandit task with the following parameters: "Arew" (Reward learning rate), "Apun" (Punishment learning rate), "R" (Reward sensitivity), and "P" (Punishment sensitivity). -#' -#' \strong{MODEL:} -#' 4 parameter model without C (choice perseveration) (Seymour et al 2012, J Neuro) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"bandit4arm_4par"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the 4-armed bandit task, there should be four columns of data with the labels -#' "subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{A nominal integer representing which choice was chosen within the given trial (e.g. 1, 2, 3, or 4).} -#' \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} -#' \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION bandit4arm_4par +#' @templateVar TASK_NAME 4-Armed Bandit Task +#' @templateVar MODEL_NAME 4 Parameter Model, without C (choice perseveration) +#' @templateVar MODEL_CITE (Seymour et al., 2012, J Neuro) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "gain", "loss" +#' @templateVar PARAMETERS "Arew" (reward learning rate), "Apun" (punishment learning rate), "R" (reward sensitivity), "P" (punishment sensitivity) +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on the given trial: 1, 2, 3, or 4.} +#' @templateVar DETAILS_DATA_3 \item{"gain"}{Floating point value representing the amount of currency won on the given trial (e.g. 50, 100).} +#' @templateVar DETAILS_DATA_4 \item{"loss"}{Floating point value representing the amount of currency lost on the given trial (e.g. 0, -50).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Seymour, Daw, Roiser, Dayan, & Dolan (2012) Serotonin Selectively Modulates Reward Value in Human Decision-Making. J Neuro, 32(17), 5833-5842. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- bandit4arm_4par("example", 3000, 1000, 4, 4) # 4 chains, 4 cores (parallel processing) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } -#' -bandit4arm_4par <- function(data = "choose", - niter = 4000, - nwarmup = 2000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "bandit4arm_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("mu_Arew", "mu_Apun", "mu_R", "mu_P", - "sigma", - "Arew", "Apun", "R", "P", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "bandit4arm_4par" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - #RLmatrix <- SRLmatrix <- array(0, c(numSubjs, maxTrials)) - rew <- array(0, c(numSubjs, maxTrials)) - los <- array(0, c(numSubjs, maxTrials)) - choice <- array(-1, c(numSubjs, maxTrials)) - - for (subjIdx in 1:numSubjs) { - #number of trials for each subj. - useTrials <- Tsubj[subjIdx] - currID <- subjList[subjIdx] - rawdata_curSubj <- subset(rawdata, rawdata$subjID == currID) - rew[subjIdx, 1:useTrials] <- rawdata_curSubj[, "gain"] - los[subjIdx, 1:useTrials] <- -1 * abs(rawdata_curSubj[, "loss"]) - - for (tIdx in 1:useTrials) { - Y_t <- rawdata_curSubj[tIdx, "choice"] # chosen Y on trial "t" - choice[subjIdx , tIdx] <- Y_t +#' Seymour, Daw, Roiser, Dayan, & Dolan (2012). Serotonin Selectively Modulates Reward Value in +#' Human Decision-Making. J Neuro, 32(17), 5833-5842. + +bandit4arm_4par <- hBayesDM_model( + task_name = "bandit4arm", + model_name = "4par", + data_columns = c("subjID", "choice", "gain", "choice"), + parameters = list("Arew" = c(0, 0.1, 1), + "Apun" = c(0, 0.1, 1), + "R" = c(0, 1, 30), + "P" = c(0, 1, 30)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + rew <- array( 0, c(n_subj, t_max)) + los <- array( 0, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + rew[i, 1:t] <- DT_subj$gain + los[i, 1:t] <- -1 * abs(DT_subj$loss) + choice[i, 1:t] <- DT_subj$choice } - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj , - rew = rew, - los = los, - choice = choice -) - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed = c(0.1, 0.1, 1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed = inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - # Non-centered parameters - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3]/30), qnorm(inits_fixed[4]/30)), - sigma = c(1.0, 1.0, 1.0, 1.0), - Arew_pr = rep(qnorm(inits_fixed[1]), numSubjs), - Apun_pr = rep(qnorm(inits_fixed[2]), numSubjs), - R_pr = rep(qnorm(inits_fixed[3]/30), numSubjs), - P_pr = rep(qnorm(inits_fixed[4]/30), numSubjs) -) - } - } else { - genInitList <- "random" + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + rew = rew, + los = los, + choice = choice + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # For parallel computing if using multi-cores - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$bandit4arm_4par - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - Arew <- parVals$Arew - Apun <- parVals$Apun - R <- parVals$R - P <- parVals$P - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(Arew[, i]), - mean(Apun[, i]), - mean(R[, i]), - mean(P[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(Arew[, i]), - median(Apun[, i]), - median(R[, i]), - median(P[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(Arew[, i]), - estimate_mode(Apun[, i]), - estimate_mode(R[, i]), - estimate_mode(P[, i])) - } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("Arew", - "Apun", - "R", - "P", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime # time took to run the code - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/bandit4arm_lapse.R b/R/bandit4arm_lapse.R index a0070e17..85a13447 100644 --- a/R/bandit4arm_lapse.R +++ b/R/bandit4arm_lapse.R @@ -1,364 +1,71 @@ -#' 4-armed bandit task -#' -#' @description -#' Hierarchical Bayesian Modeling of the 4-armed bandit task with the following parameters: "Arew" (Reward learning rate), "Apun" (Punishment learning rate), "R" (Reward sensitivity), "P" (Punishment sensitivity), and "xi" (Noise). -#' -#' \strong{MODEL:} -#' 5 parameter model without C (choice perseveration) but with xi (noise) (Seymour et al 2012, J Neuro) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"bandit4arm_lapse"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the 4-armed bandit task, there should be four columns of data with the labels -#' "subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{A nominal integer representing which choice was chosen within the given trial (e.g. 1, 2, 3, or 4).} -#' \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} -#' \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION bandit4arm_lapse +#' @templateVar TASK_NAME 4-Armed Bandit Task +#' @templateVar MODEL_NAME 5 Parameter Model, without C (choice perseveration) but with xi (noise) +#' @templateVar MODEL_CITE (Seymour et al., 2012, J Neuro) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "gain", "loss" +#' @templateVar PARAMETERS "Arew" (reward learning rate), "Apun" (punishment learning rate), "R" (reward sensitivity), "P" (punishment sensitivity), "xi" (noise) +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on the given trial: 1, 2, 3, or 4.} +#' @templateVar DETAILS_DATA_3 \item{"gain"}{Floating point value representing the amount of currency won on the given trial (e.g. 50, 100).} +#' @templateVar DETAILS_DATA_4 \item{"loss"}{Floating point value representing the amount of currency lost on the given trial (e.g. 0, -50).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' -#' Seymour, Daw, Roiser, Dayan, & Dolan (2012) Serotonin Selectively Modulates Reward Value in Human Decision-Making. J Neuro, 32(17), 5833-5842. -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- bandit4arm_lapse("example", 3000, 1000, 4, 4) # 4 chains, 4 cores (parallel processing) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } -#' -bandit4arm_lapse <- function(data = "choose", - niter = 4000, - nwarmup = 2000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "bandit4arm_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 5 - POI <- c("mu_Arew", "mu_Apun", "mu_R", "mu_P", "mu_xi", - "sigma", - "Arew", "Apun", "R", "P", "xi", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "bandit4arm_lapse" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - rew <- array(0, c(numSubjs, maxTrials)) - los <- array(0, c(numSubjs, maxTrials)) - choice <- array(-1, c(numSubjs, maxTrials)) - - for (subjIdx in 1:numSubjs) { - #number of trials for each subj. - useTrials <- Tsubj[subjIdx] - currID <- subjList[subjIdx] - rawdata_curSubj <- subset(rawdata, rawdata$subjID == currID) - rew[subjIdx, 1:useTrials] <- rawdata_curSubj[, "gain"] - los[subjIdx, 1:useTrials] <- -1 * abs(rawdata_curSubj[, "loss"]) - - for (tIdx in 1:useTrials) { - Y_t <- rawdata_curSubj[tIdx, "choice"] # chosen Y on trial "t" - choice[subjIdx , tIdx] <- Y_t +#' Seymour, Daw, Roiser, Dayan, & Dolan (2012). Serotonin Selectively Modulates Reward Value in +#' Human Decision-Making. J Neuro, 32(17), 5833-5842. + +bandit4arm_lapse <- hBayesDM_model( + task_name = "bandit4arm", + model_name = "lapse", + data_columns = c("subjID", "choice", "gain", "loss"), + parameters = list("Arew" = c(0, 0.1, 1), + "Apun" = c(0, 0.1, 1), + "R" = c(0, 1, 30), + "P" = c(0, 1, 30), + "xi" = c(0, 0.1, 1)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + rew <- array( 0, c(n_subj, t_max)) + los <- array( 0, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + rew[i, 1:t] <- DT_subj$gain + los[i, 1:t] <- -1 * abs(DT_subj$loss) + choice[i, 1:t] <- DT_subj$choice } - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj , - rew = rew, - los = los, - choice = choice -) - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed = c(0.1, 0.1, 1.0, 1.0, 0.1) - } else { - if (length(inits) == numPars) { - inits_fixed = inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - # Non-centered parameters - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3]/30), qnorm(inits_fixed[4]/30), qnorm(inits_fixed[5])), - sigma = c(1.0, 1.0, 1.0, 1.0, 1.0), - Arew_pr = rep(qnorm(inits_fixed[1]), numSubjs), - Apun_pr = rep(qnorm(inits_fixed[2]), numSubjs), - R_pr = rep(qnorm(inits_fixed[3]/30), numSubjs), - P_pr = rep(qnorm(inits_fixed[4]/30), numSubjs), - xi_pr = rep(qnorm(inits_fixed[5]), numSubjs) -) - } - } else { - genInitList <- "random" + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + rew = rew, + los = los, + choice = choice + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # For parallel computing if using multi-cores - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$bandit4arm_lapse - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - Arew <- parVals$Arew - Apun <- parVals$Apun - R <- parVals$R - P <- parVals$P - xi <- parVals$xi - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(Arew[, i]), - mean(Apun[, i]), - mean(R[, i]), - mean(P[, i]), - mean(xi[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(Arew[, i]), - median(Apun[, i]), - median(R[, i]), - median(P[, i]), - median(xi[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(Arew[, i]), - estimate_mode(Apun[, i]), - estimate_mode(R[, i]), - estimate_mode(P[, i]), - estimate_mode(xi[, i])) - } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("Arew", - "Apun", - "R", - "P", - "xi", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime # time took to run the code - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/bart_par4.R b/R/bart_par4.R old mode 100755 new mode 100644 index ab3c4c4f..606f7ccc --- a/R/bart_par4.R +++ b/R/bart_par4.R @@ -1,347 +1,68 @@ -#' Balloon Analogue Risk Task (Ravenzwaaij et al., 2011, Journal of Mathematical Psychology) -#' -#' @description -#' Hierarchical Bayesian Modeling of the Balloon Analogue Risk Task with the following 4 parameters: "phi" (prior belief of the balloon not going to be burst), "eta" (updating rate), "gam" (risk-taking parameter), and "tau" (inverse temperature).\cr\cr -#' -#' Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park}, \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang}, \href{https://ccs-lab.github.io/team/ayoung-lee/}{Ayoung Lee}, \href{https://ccs-lab.github.io/team/jeongbin-oh/}{Jeongbin Oh}, \href{https://ccs-lab.github.io/team/jiyoon-lee/}{Jiyoon Lee}, \href{https://ccs-lab.github.io/team/junha-jang/}{Junha Jang} -#' -#' \strong{MODEL:} -#' Reparameterized version (by Harhim Park & Jaeyeong Yang) of Balloon Analogue Risk Task model (Ravenzwaaij et al., 2011) with four parameters -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "pumps", and "explosion". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"bart_par4"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Balloon Analogue Risk Task, there should be three columns of data with the labels -#' "subjID", "pumps", "explosion". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"pumps"}}{The number of pumps} -#' \item{\code{"explosion"}}{0: intact, 1: burst} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION bart_par4 +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park}, \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang}, \href{https://ccs-lab.github.io/team/ayoung-lee/}{Ayoung Lee}, \href{https://ccs-lab.github.io/team/jeongbin-oh/}{Jeongbin Oh}, \href{https://ccs-lab.github.io/team/jiyoon-lee/}{Jiyoon Lee}, \href{https://ccs-lab.github.io/team/junha-jang/}{Junha Jang} +#' @templateVar TASK_NAME Balloon Analogue Risk Task +#' @templateVar TASK_CITE (Ravenzwaaij et al., 2011, Journal of Mathematical Psychology) +#' @templateVar MODEL_NAME Re-parameterized version (by Harhim Park & Jaeyeong Yang) of BART Model (Ravenzwaaij et al., 2011) with 4 parameters +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "pumps", "explosion" +#' @templateVar PARAMETERS "phi" (prior belief of balloon not bursting), "eta" (updating rate), "gam" (risk-taking parameter), "tau" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"pumps"}{The number of pumps.} +#' @templateVar DETAILS_DATA_3 \item{"explosion"}{0: intact, 1: burst} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' van Ravenzwaaij, D., Dutilh, G., & Wagenmakers, E. J. (2011). Cognitive model decomposition of the BART: Assessment and application. -#' Journal of Mathematical Psychology, 55(1), 94-105. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- bart_par4(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' -#' -#' } - -bart_par4 <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "bart_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("mu_phi", "mu_eta", "mu_gam", "mu_tau", - "sigma", - "phi", "eta", "gam", "tau", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "bart_par4" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - pumps <- array(0, c(numSubjs, maxTrials)) - explosion <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - - pumps[i, 1:useTrials] <- tmp[1:useTrials, "pumps"] - explosion[i, 1:useTrials] <- tmp[1:useTrials, "explosion"] - } - - maxPumps <- max(pumps) - - dataList <- list( - N = numSubjs, - T = maxTrials, - P = maxPumps + 1, - Tsubj = Tsubj, - numPars = numPars, - pumps = pumps, - explosion = explosion - ) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 1.0, 1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), log(inits_fixed[2:4])), - sigma = c(1.0, 1.0, 1.0, 1.0), - phi_p = rep(qnorm(inits_fixed[1]), numSubjs), - eta_p = rep(log(inits_fixed[2]), numSubjs), - gam_p = rep(log(inits_fixed[3]), numSubjs), - tau_p = rep(log(inits_fixed[4]), numSubjs) - ) +#' van Ravenzwaaij, D., Dutilh, G., & Wagenmakers, E. J. (2011). Cognitive model decomposition of the +#' BART: Assessment and application. Journal of Mathematical Psychology, 55(1), 94-105. + +bart_par4 <- hBayesDM_model( + task_name = "bart", + model_name = "par4", + data_columns = c("subjID", "pumps", "explosion"), + parameters = list("phi" = c(0, 0.5, 1), + "eta" = c(0, 1, Inf), + "gam" = c(0, 1, Inf), + "tau" = c(0, 1, Inf)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + pumps <- array(0, c(n_subj, t_max)) + explosion <- array(0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + pumps[i, 1:t] <- DT_subj$pumps + explosion[i, 1:t] <- DT_subj$explosion } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$bart_par4 - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - phi <- parVals$phi - eta <- parVals$eta - gam <- parVals$gam - tau <- parVals$tau - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - allIndPars[i,] <- c(measureIndPars(phi[, i]), - measureIndPars(eta[, i]), - measureIndPars(gam[, i]), - measureIndPars(tau[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("phi", - "eta", - "gam", - "tau", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + P = max(pumps) + 1, + pumps = pumps, + explosion = explosion + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/choiceRT_ddm.R b/R/choiceRT_ddm.R index 3383e2c9..6acdea5d 100644 --- a/R/choiceRT_ddm.R +++ b/R/choiceRT_ddm.R @@ -1,365 +1,85 @@ -#' Choice Reaction Time task, drift diffusion modeling +#' @templateVar MODEL_FUNCTION choiceRT_ddm +#' @templateVar TASK_NAME Choice Reaction Time Task +#' @templateVar MODEL_NAME Drift Diffusion Model +#' @templateVar MODEL_CITE (Ratcliff, 1978, Psychological Review)\cr *Note that this implementation is \strong{not} the full Drift Diffusion Model as described in Ratcliff (1978). This implementation estimates the drift rate, boundary separation, starting point, and non-decision time; but not the between- and within-trial variances in these parameters. +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "RT" +#' @templateVar PARAMETERS "alpha" (boundary separation), "beta" (bias), "delta" (drift rate), "tau" (non-decision time) +#' @templateVar IS_NULL_POSTPREDS TRUE +#' @templateVar ADDITIONAL_ARG \code{RTbound}: Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds). +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Choice made for the current trial, coded as \code{1}/\code{2} to indicate lower/upper boundary or left/right choices (e.g., 1 1 1 2 1 2).} +#' @templateVar DETAILS_DATA_3 \item{"RT"}{Choice reaction time for the current trial, in \strong{seconds} (e.g., 0.435 0.383 0.314 0.309, etc.).} +#' +#' @template model-documentation #' -#' @description -#' Hierarchical Bayesian Modeling of choice/reaction time data with the following parameters: "alpha" (boundary separation), "beta" (bias), "delta" (drift rate), "tau" (non-decision time). -#' The code is based on codes/comments by Guido Biele, Joseph Burling, Andrew Ellis, and potentially others @ Stan mailing -#' -#' \strong{MODEL:} -#' Ratcliff drift diffusion model - multiple subjects. Note that this implementation is \strong{not} the full drift diffusion model as described in -#' Ratcliff (1978). This implementation estimates the drift rate, boundary separation, starting point, and non-decision time, but not the between- -#' and within-trial variances in these parameters. -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID, ""choice", and "RT". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param RTbound A floating point number representing the lower bound (i.e. minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds). -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred (\strong{Not currently available for DDM models}) Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{'hBayesDM'} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"choiceRT_ddm"}).} -#' \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter -#' values (as specified by \code{'indPars'}) for each subject.} -#' \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For choice/reaction-time tasks, there should be three columns of data with the labels "subjID", "choice", and "RT". -#' It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -#' correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer representing the choice made on the current trial. Lower/upper boundary or left/right choices should be coded as 1/2 (e.g., 1 1 1 2 1 2).} -#' \item{\code{"RT"}}{A floating number the choice reaction time in \strong{seconds}. (e.g., 0.435 0.383 0.314 0.309, etc.).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "stimulus_name", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". +#' @export +#' @include hBayesDM_model.R +#' @importFrom stats aggregate #' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. +#' @description +#' Code for this model is based on codes/comments by Guido Biele, Joseph Burling, Andrew Ellis, and potential others @ Stan mailing #' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. -#' -#' @export +#' Parameters of the DDM (parameter names in Ratcliff), from \url{https://github.com/gbiele/stan_wiener_test/blob/master/stan_wiener_test.R} +#' \cr - alpha (a): Boundary separation or Speed-accuracy trade-off (high alpha means high accuracy). 0 < alpha +#' \cr - beta (b): Initial bias, for either response (beta > 0.5 means bias towards "upper" response 'A'). 0 < beta < 1 +#' \cr - delta (v): Drift rate; Quality of the stimulus (delta close to 0 means ambiguous stimulus or weak ability). 0 < delta +#' \cr - tau (ter): Non-decision time + Motor response time + encoding time (high means slow encoding, execution). 0 < tau (in seconds) #' #' @references #' Ratcliff, R. (1978). A theory of memory retrieval. Psychological Review, 85(2), 59-108. http://doi.org/10.1037/0033-295X.85.2.59 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- choiceRT_ddm(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -choiceRT_ddm <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - RTbound = 0.1, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "choiceRT_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("mu_alpha", "mu_beta", "mu_delta", "mu_tau", - "sigma", - "alpha", "beta", "delta", "tau", - "log_lik") - - if (inc_postpred) { - stop("Posterior Predictions are not yet available for this model. Please set inc_postpred to FALSE") - } - - # parameters of the DDM (parameter names in Ratcliffs DDM), from https://github.com/gbiele/stan_wiener_test/blob/master/stan_wiener_test.R - # alpha (a): Boundary separation or Speed-accuracy trade-off (high alpha means high accuracy). alpha > 0 - # beta (b): Initial bias Bias for either response (beta > 0.5 means bias towards "upper" response 'A'). 0 < beta < 1 - # delta (v): Drift rate Quality of the stimulus (delta close to 0 means ambiguous stimulus or weak ability). 0 < delta - # tau (ter): Nondecision time + Motor response time + encoding time (high means slow encoding, execution). 0 < ter (in seconds) - - modelName <- "choiceRT_ddm" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials of this subject = ", maxTrials, "\n\n") - - # Number of upper and lower boundary responses for each subject - Nu <- with(rawdata, aggregate(choice == 2, by = list(y = subjID), FUN = sum)[["x"]]) - Nl <- with(rawdata, aggregate(choice == 1, by = list(y = subjID), FUN = sum)[["x"]]) - - # Minimum reaction time per subject - minRT <- with(rawdata, aggregate(RT, by = list(y = subjID), FUN = min)[["x"]]) - - # Reaction times for upper and lower boundary responses - RTu <- array(-1, c(numSubjs, max(Nu))) - RTl <- array(-1, c(numSubjs, max(Nl))) - - # Store each subjects' reaction time data - for (i in 1:numSubjs) { - curSubj <- subjList[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - RTu[i, 1:Nu[i]] <- tmp$RT[tmp$choice == 2] # (Nu/Nl[i]+1):Nu/Nl_max will be padded with 0's - RTl[i, 1:Nl[i]] <- tmp$RT[tmp$choice == 1] # 0 padding is skipped in likelihood calculation - } - - # List of data sent to Stan - dataList <- list( - N = numSubjs, # Number of subjects - Nu_max = max(Nu), # Max (across subjects) number of upper boundary responses - Nl_max = max(Nl), # Max (across subjects) number of lower boundary responses - Nu = Nu, # Number of upper boundary responses for each subj - Nl = Nl, # Number of lower boundary responses for each subj - RTu = RTu, # upper boundary response times - RTl = RTl, # lower boundary response times - minRT = minRT, # minimum RT for each subject of the observed data - RTbound = RTbound # lower bound or RT across all subjects (e.g., 0.1 second) -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.5, 0.5, 0.15) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(log(inits_fixed[1]), qnorm(inits_fixed[2]), log(inits_fixed[3]), qnorm(inits_fixed[4])), - sigma = c(1.0, 1.0, 1.0, 1.0), - alpha_pr = rep(log(inits_fixed[1]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[2]), numSubjs), - delta_pr = rep(log(inits_fixed[3]), numSubjs), - tau_pr = rep(qnorm(inits_fixed[4]), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } - else{ - options(mc.cores = ncore) +choiceRT_ddm <- hBayesDM_model( + task_name = "choiceRT", + model_name = "ddm", + data_columns = c("subjID", "choice", "RT"), + parameters = list("alpha" = c(0, 0.5, Inf), + "beta" = c(0, 0.5, 1), + "delta" = c(0, 0.5, Inf), + "tau" = c(0, 0.15, 1)), + postpreds = NULL, + preprocess_func = function(raw_data, general_info, RTbound = 0.1) { + # Use raw_data as a data.frame + raw_data <- as.data.frame(raw_data) + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + + # Number of upper and lower boundary responses + Nu <- with(raw_data, aggregate(choice == 2, by = list(y = subjid), FUN = sum)[["x"]]) + Nl <- with(raw_data, aggregate(choice == 1, by = list(y = subjid), FUN = sum)[["x"]]) + + # Reaction times for upper and lower boundary responses + RTu <- array(-1, c(n_subj, max(Nu))) + RTl <- array(-1, c(n_subj, max(Nl))) + for (i in 1:n_subj) { + subj <- subjs[i] + subj_data <- subset(raw_data, raw_data$subjid == subj) + + RTu[i, 1:Nu[i]] <- subj_data$rt[subj_data$choice == 2] # (Nu/Nl[i]+1):Nu/Nl_max will be padded with 0's + RTl[i, 1:Nl[i]] <- subj_data$rt[subj_data$choice == 1] # 0 padding is skipped in likelihood calculation } - } - else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$choiceRT_ddm - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) + # Minimum reaction time + minRT <- with(raw_data, aggregate(rt, by = list(y = subjid), FUN = min)[["x"]]) + + # Wrap into a list for Stan + data_list <- list( + N = n_subj, # Number of subjects + Nu_max = max(Nu), # Max (across subjects) number of upper boundary responses + Nl_max = max(Nl), # Max (across subjects) number of lower boundary responses + Nu = Nu, # Number of upper boundary responses for each subject + Nl = Nl, # Number of lower boundary responses for each subject + RTu = RTu, # Upper boundary response times + RTl = RTl, # Lower boundary response times + minRT = minRT, # Minimum RT for each subject + RTbound = RTbound # Lower bound of RT across all subjects (e.g., 0.1 second) + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } - parVals <- rstan::extract(fit, permuted = T) - - alpha <- parVals$alpha - beta <- parVals$beta - delta <- parVals$delta - tau <- parVals$tau - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(alpha[, i]), - mean(beta[, i]), - mean(delta[, i]), - mean(tau[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(alpha[, i]), - median(beta[, i]), - median(delta[, i]), - median(tau[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(alpha[, i]), - estimate_mode(beta[, i]), - estimate_mode(delta[, i]), - estimate_mode(tau[, i])) - } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("alpha", - "beta", - "delta", - "tau", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") +) - return(modelData) -} diff --git a/R/choiceRT_ddm_single.R b/R/choiceRT_ddm_single.R index af95f544..50067d46 100644 --- a/R/choiceRT_ddm_single.R +++ b/R/choiceRT_ddm_single.R @@ -1,340 +1,63 @@ -#' Choice Reaction Time task, drift diffusion modeling +#' @templateVar MODEL_FUNCTION choiceRT_ddm_single +#' @templateVar TASK_NAME Choice Reaction Time Task +#' @templateVar MODEL_NAME Drift Diffusion Model +#' @templateVar MODEL_CITE (Ratcliff, 1978, Psychological Review)\cr *Note that this implementation is \strong{not} the full Drift Diffusion Model as described in Ratcliff (1978). This implementation estimates the drift rate, boundary separation, starting point, and non-decision time; but not the between- and within-trial variances in these parameters. +#' @templateVar MODEL_TYPE Individual +#' @templateVar DATA_COLUMNS "subjID", "choice", "RT" +#' @templateVar PARAMETERS "alpha" (boundary separation), "beta" (bias), "delta" (drift rate), "tau" (non-decision time) +#' @templateVar IS_NULL_POSTPREDS TRUE +#' @templateVar ADDITIONAL_ARG \code{RTbound}: Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds). +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Choice made for the current trial, coded as \code{1}/\code{2} to indicate lower/upper boundary or left/right choices (e.g., 1 1 1 2 1 2).} +#' @templateVar DETAILS_DATA_3 \item{"RT"}{Choice reaction time for the current trial, in \strong{seconds} (e.g., 0.435 0.383 0.314 0.309, etc.).} +#' +#' @template model-documentation #' -#' @description -#' Individual Bayesian Modeling of choice/reaction time data with the following parameters: "alpha" (boundary separation), "beta" (bias), "delta" (drift rate), "tau" (non-decision time). -#' The code is based on codes/comments by Guido Biele, Joseph Burling, Andrew Ellis, and potentially others @ Stan mailing -#' -#' \strong{MODEL:} -#' Ratcliff drift diffusion model - single subject. Note that this implementation is \strong{not} the full drift diffusion model as described in -#' Ratcliff (1978). This implementation estimates the drift rate, boundary separation, starting point, and non-decision time, but not the between- -#' and within-trial variances in these parameters. -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID, ""choice", and "RT". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param RTbound A floating point number representing the lower bound (i.e. minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds). -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred (\strong{Not currently available for DDM models}) Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{'hBayesDM'} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"choiceRT_ddm_single"}).} -#' \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter -#' values (as specified by \code{'indPars'}) for the single subject.} -#' \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of the subject of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For choice/reaction-time tasks, there should be two columns of data with the labels "subjID", "choice", and "RT". -#' It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -#' correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer representing the choice made on the current trial. Lower/upper boundary or left/right choices should be coded as 1/2 (e.g., 1 1 1 2 1 2).} -#' \item{\code{"RT"}}{A floating number the choice reaction time in \strong{seconds}. (e.g., 0.435 0.383 0.314 0.309, etc.).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "subjID", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". +#' @export +#' @include hBayesDM_model.R #' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. +#' @description +#' Code for this model is based on codes/comments by Guido Biele, Joseph Burling, Andrew Ellis, and potential others @ Stan mailing #' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. -#' -#' @export +#' Parameters of the DDM (parameter names in Ratcliff), from \url{https://github.com/gbiele/stan_wiener_test/blob/master/stan_wiener_test.R} +#' \cr - alpha (a): Boundary separation or Speed-accuracy trade-off (high alpha means high accuracy). 0 < alpha +#' \cr - beta (b): Initial bias, for either response (beta > 0.5 means bias towards "upper" response 'A'). 0 < beta < 1 +#' \cr - delta (v): Drift rate; Quality of the stimulus (delta close to 0 means ambiguous stimulus or weak ability). 0 < delta +#' \cr - tau (ter): Non-decision time + Motor response time + encoding time (high means slow encoding, execution). 0 < tau (in seconds) #' #' @references #' Ratcliff, R. (1978). A theory of memory retrieval. Psychological Review, 85(2), 59-108. http://doi.org/10.1037/0033-295X.85.2.59 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- choiceRT_ddm_single(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -choiceRT_ddm_single <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - RTbound = 0.1, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "choiceRT_single_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Individual Subjects - subjID <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjID) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("alpha", "beta", "delta", "tau", - "log_lik") - if (inc_postpred) { - stop("Posterior Predictions are not yet available for this model. Please set inc_postpred to FALSE") +choiceRT_ddm_single <- hBayesDM_model( + task_name = "choiceRT", + model_name = "ddm", + model_type = "single", + data_columns = c("subjID", "choice", "RT"), + parameters = list("alpha" = c(NA, 0.5, NA), + "beta" = c(NA, 0.5, NA), + "delta" = c(NA, 0.5, NA), + "tau" = c(NA, 0.15, NA)), + postpreds = NULL, + preprocess_func = function(raw_data, general_info, RTbound = 0.1) { + # Currently class(raw_data) == "data.table" + + # Data.tables for upper and lower boundary responses + DT_upper <- raw_data[choice == 2] + DT_lower <- raw_data[choice == 1] + + # Wrap into a list for Stan + data_list <- list( + Nu = nrow(DT_upper), # Number of upper boundary responses + Nl = nrow(DT_lower), # Number of lower boundary responses + RTu = DT_upper$rt, # Upper boundary response times + RTl = DT_lower$rt, # Lower boundary response times + minRT = min(raw_data$rt), # Minimum RT + RTbound = RTbound # Lower bound of RT (e.g., 0.1 second) + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } - - # parameters of the DDM (parameter names in Ratcliffs DDM), from https://github.com/gbiele/stan_wiener_test/blob/master/stan_wiener_test.R - # alpha (a): Boundary separation or Speed-accuracy trade-off (high alpha means high accuracy). alpha > 0 - # beta (b): Initial bias Bias for either response (beta > 0.5 means bias towards "upper" response 'A'). 0 < beta < 1 - # delta (v): Drift rate Quality of the stimulus (delta close to 0 means ambiguous stimulus or weak ability). 0 < delta - # tau (ter): Nondecision time + Motor response time + encoding time (high means slow encoding, execution). 0 < ter (in seconds) - - modelName <- "choiceRT_ddm_single" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - # Setting number trial/subject - Tsubj <- dim(rawdata)[1] - - # Information for user continued - cat(" # of (max) trials of this subject = ", Tsubj, "\n\n") - - data_upper <- subset(rawdata, rawdata$choice == 2) - data_lower <- subset(rawdata, rawdata$choice == 1) - - Nu <- dim(data_upper)[1] - Nl <- dim(data_lower)[1] - RTu <- data_upper$RT - RTl <- data_lower$RT - minRT <- min(rawdata$RT) - - dataList <- list( - Tsubj = Tsubj, - Nu = Nu, - Nl = Nl, - RTu = RTu, - RTl = RTl, - minRT = minRT, - RTbound = RTbound -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.5, 0.5, 0.15) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - alpha = inits_fixed[1], - beta = inits_fixed[2], - delta = inits_fixed[3], - tau = inits_fixed[4] ) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$choiceRT_ddm_single - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - parVals <- rstan::extract(fit, permuted = T) - - alpha <- parVals$alpha - beta <- parVals$beta - delta <- parVals$delta - tau <- parVals$tau - - #allIndPars <- array(NA, c(numSubjs, numPars)) - - if (indPars == "mean") { - allIndPars <- c(mean(alpha), - mean(beta), - mean(delta), - mean(tau)) - } else if (indPars == "median") { - allIndPars <- c(median(alpha), - median(beta), - median(delta), - median(tau)) - } else if (indPars == "mode") { - allIndPars <- c(estimate_mode(alpha), - estimate_mode(beta), - estimate_mode(delta), - estimate_mode(tau)) - } - - allIndPars <- t(as.data.frame(allIndPars)) - allIndPars <- as.data.frame(allIndPars) - allIndPars$subjID <- subjID - colnames(allIndPars) <- c("alpha", - "beta", - "delta", - "tau", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - return(modelData) -} diff --git a/R/choiceRT_lba.R b/R/choiceRT_lba.R index 5120c64e..8e69d54e 100644 --- a/R/choiceRT_lba.R +++ b/R/choiceRT_lba.R @@ -37,6 +37,7 @@ #' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} #' } #' +#' @include settings.R #' @importFrom rstan vb sampling stan_model rstan_options extract #' @importFrom parallel detectCores #' @importFrom stats median qnorm density @@ -302,7 +303,14 @@ choiceRT_lba <- function(data = "choose", cat("***********************************\n") # Fit the Stan model - m = stanmodels$choiceRT_lba + if (FLAG_BUILD_ALL) { + m = stanmodels$choiceRT_lba + } else { + model_path <- system.file("stan_files", paste0(modelName, ".stan"), + package="hBayesDM") + m <- rstan::stan_model(model_path) + } + if (vb) { # if variational Bayesian fit = rstan::vb(m, data = dataList, @@ -390,3 +398,4 @@ choiceRT_lba <- function(data = "choose", return(modelData) } + diff --git a/R/choiceRT_lba_single.R b/R/choiceRT_lba_single.R index 2c7f0e8c..1800fd8f 100644 --- a/R/choiceRT_lba_single.R +++ b/R/choiceRT_lba_single.R @@ -37,6 +37,7 @@ #' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} #' } #' +#' @include settings.R #' @importFrom rstan vb sampling stan_model rstan_options extract #' @importFrom parallel detectCores #' @importFrom stats median qnorm density @@ -199,30 +200,30 @@ choiceRT_lba_single <- function(data = "choose", cat(" # of (max) trials of this subject = ", Tsubj, "\n\n") # Number of different choices - num_choices <- length(unique(rawdata$choice)) + N_choice <- length(unique(rawdata$choice)) # Number of different conditions (e.g. speed/accuracy) - num_cond <- length(unique(rawdata$condition)) + N_cond <- length(unique(rawdata$condition)) # To store number of trials/condition for given subject - n_tr_cond <- array(NA, dim = c(num_cond)) + tr_cond <- array(NA, dim = c(N_cond)) # Loop through conditions - for (j in 1:num_cond) { - n_tr_cond[j] <- sum(rawdata$condition == j) + for (j in 1:N_cond) { + tr_cond[j] <- sum(rawdata$condition == j) } # Max trials across conditions - max_tr <- max(n_tr_cond) + max_tr <- max(tr_cond) # Array for storing RT + choice data - RT <- array(-1, dim = c(num_cond, 2, max_tr)) + RT <- array(-1, dim = c(N_cond, 2, max_tr)) # Reaction time + choice matrix - for (cond in 1:num_cond) { - for (choice in 1:num_choices) { + for (cond in 1:N_cond) { + for (choice in 1:N_choice) { # Subset current data tmp <- subset(rawdata, rawdata$condition == cond & rawdata$choice == choice) # trials for current subject/condition pair - tmp_trials <- n_tr_cond[cond] + tmp_trials <- tr_cond[cond] # Store reaction time + choice RT[cond, 1, 1:tmp_trials] <- tmp$RT RT[cond, 2, 1:tmp_trials] <- tmp$choice @@ -230,11 +231,11 @@ choiceRT_lba_single <- function(data = "choose", } dataList <- list( - N_tr_cond = n_tr_cond, - N_choices = num_choices, - N_cond = num_cond, - RT = RT, - Max_tr = max_tr + N_choice = N_choice, + N_cond = N_cond, + tr_cond = tr_cond, + max_tr = max_tr, + RT = RT ) # inits @@ -279,7 +280,14 @@ choiceRT_lba_single <- function(data = "choose", cat("***********************************\n") # Fit the Stan model - m = stanmodels$choiceRT_lba_single + if (FLAG_BUILD_ALL) { + m = stanmodels$choiceRT_lba_single + } else { + model_path <- system.file("stan_files", paste0(modelName, ".stan"), + package="hBayesDM") + m <- rstan::stan_model(model_path) + } + if (vb) { # if variational Bayesian fit = rstan::vb(m, data = dataList, @@ -330,8 +338,8 @@ choiceRT_lba_single <- function(data = "choose", allIndPars$subjID <- subjID colnames(allIndPars) <- c("d", "A", - apply(expand.grid(paste0("v_cd", 1:num_cond), - paste0("_ch", 1:num_choices)), + apply(expand.grid(paste0("v_cd", 1:N_cond), + paste0("_ch", 1:N_choice)), 1, paste, collapse = ""), "tau", "subjID") @@ -365,3 +373,4 @@ choiceRT_lba_single <- function(data = "choose", return(modelData) } + diff --git a/R/cra_exp.R b/R/cra_exp.R old mode 100755 new mode 100644 index 75f2721f..79db92e6 --- a/R/cra_exp.R +++ b/R/cra_exp.R @@ -1,383 +1,84 @@ -#' Choice under Risk and Ambiguity Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Choice under Risk and Ambiguity Task -#' with the following parameters: -#' "alpha" (risk attitude), -#' "beta" (ambiguity attitude), and -#' "gamma" (inverse temperature). -#' -#' Contributor: \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} -#' -#' \strong{MODEL:} -#' Exponential subjective value model (Hsu et al., 2005, Science) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: -#' "subjID", "prob", "ambig", "reward_var", "reward_fix", and "choice". -#' See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"cra_exp"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Choice under Risk and Ambiguity Task, there should be five columns of data with the labels -#' "subjID", "prob", "ambig", "reward_var", "reward_fix" and "choice". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"prob"}}{Objective probability of a variable lottery.} -#' \item{\code{"ambig"}}{Ambiguity levels of variable lotteries. For a risky lottery, \code{"ambig"} equals 0, and more than zero for an ambiguous lottery} -#' \item{\code{"reward_var"}}{Amounts of reward values in variable lotteries. \code{"reward_var"} is assumed to be greater than zero.} -#' \item{\code{"reward_fix"}}{Amounts of reward values in fixed lotteries. \code{"reward_fix"} is assumed to be greater than zero.} -#' \item{\code{"choice"}}{If the variable lottery was taken, \code{"choice"} equals 1, otherwise 0.} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION cra_exp +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} +#' @templateVar TASK_NAME Choice Under Risk and Ambiguity Task +#' @templateVar MODEL_NAME Exponential Subjective Value Model +#' @templateVar MODEL_CITE (Hsu et al., 2005, Science) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "prob", "ambig", "reward_var", "reward_fix", "choice" +#' @templateVar PARAMETERS "alpha" (risk attitude), "beta" (ambiguity attitude), "gamma" (inverse temperature) +#' @templateVar REGRESSORS "sv", "sv_fix", "sv_var", "p_var" +#' @templateVar LENGTH_DATA_COLUMNS 6 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"prob"}{Objective probability of the variable lottery.} +#' @templateVar DETAILS_DATA_3 \item{"ambig"}{Ambiguity level of the variable lottery (0 for risky lottery; greater than 0 for ambiguous lottery).} +#' @templateVar DETAILS_DATA_4 \item{"reward_var"}{Amount of reward in variable lottery. Assumed to be greater than zero.} +#' @templateVar DETAILS_DATA_5 \item{"reward_fix"}{Amount of reward in fixed lottery. Assumed to be greater than zero.} +#' @templateVar DETAILS_DATA_6 \item{"choice"}{If the variable lottery was selected, choice == 1; otherwise choice == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hsu, M., Bhatt, M., Adolphs, R., Tranel, D., & Camerer, C. F. (2005). -#' Neural systems responding to degrees of uncertainty in human decision-making. -#' Science, 310(5754), 1680–1683. https://doi.org/10.1126/science.1115327 -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- cra_exp(data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -cra_exp <- function(data = "choose", - niter = 2000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "cra_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[, "subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_alpha", "mu_beta", "mu_gamma", - "alpha" , "beta", "gamma", - "log_lik") - - # TODO: Check which indices are needed for regressors - if (modelRegressor) - POI <- c(POI, "mr_sv", "mr_p_var") - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "cra_linear" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - choice <- array(0, c(numSubjs, maxTrials)) - prob <- array(0, c(numSubjs, maxTrials)) - ambig <- array(0, c(numSubjs, maxTrials)) - reward_var <- array(0, c(numSubjs, maxTrials)) - reward_fix <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - - choice[i, 1:useTrials] <- tmp[1:useTrials, "choice"] - prob[i, 1:useTrials] <- tmp[1:useTrials, "prob"] - ambig[i, 1:useTrials] <- tmp[1:useTrials, "ambig"] - reward_var[i, 1:useTrials] <- tmp[1:useTrials, "reward_var"] - reward_fix[i, 1:useTrials] <- tmp[1:useTrials, "reward_fix"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - prob = prob, - ambig = ambig, - reward_var = reward_var, - reward_fix = reward_fix) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 0.0, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") +#' Hsu, M., Bhatt, M., Adolphs, R., Tranel, D., & Camerer, C. F. (2005). Neural systems responding +#' to degrees of uncertainty in human decision-making. Science, 310(5754), 1680-1683. +#' https://doi.org/10.1126/science.1115327 + +cra_exp <- hBayesDM_model( + task_name = "cra", + model_name = "exp", + data_columns = c("subjID", "prob", "ambig", "reward_var", "reward_fix", "choice"), + parameters = list("alpha" = c(0, 1, 2), + "beta" = c(-Inf, 0, Inf), + "gamma" = c(0, 1, Inf)), + regressors = list("sv" = 2, + "sv_fix" = 2, + "sv_var" = 2, + "p_var" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(0, c(n_subj, t_max)) + prob <- array(0, c(n_subj, t_max)) + ambig <- array(0, c(n_subj, t_max)) + reward_var <- array(0, c(n_subj, t_max)) + reward_fix <- array(0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + prob[i, 1:t] <- DT_subj$prob + ambig[i, 1:t] <- DT_subj$ambig + reward_var[i, 1:t] <- DT_subj$rewardvar + reward_fix[i, 1:t] <- DT_subj$rewardfix } - # TODO: Change expressions of randomly generated values in genInitList - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1] / 2), inits_fixed[2], qnorm(inits_fixed[3])), - sigma = c(1.0, 1.0, 1.0), - alpha_p = rep(qnorm(inits_fixed[1] / 2), numSubjs), - beta_p = rep(inits_fixed[2], numSubjs), - gamma_p = rep(qnorm(inits_fixed[3]), numSubjs) - ) - } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m <- stanmodels$cra_exp - - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit <- rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - alpha <- parVals$alpha - beta <- parVals$beta - gamma <- parVals$gamma - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(alpha[, i]), - measureIndPars(beta[, i]), - measureIndPars(gamma[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("alpha", - "beta", - "gamma", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - # TODO: Change this block after re-choosing the proper regressors - if (modelRegressor) { - sv <- apply(parVals$mr_sv, c(2, 3), measureIndPars) - p_var <- apply(parVals$mr_p_var, c(2, 3), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - modelRegressor <- NULL - modelRegressor$sv <- sv - modelRegressor$p_var <- p_var - - modelData$modelRegressor <- modelRegressor + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + prob = prob, + ambig = ambig, + reward_var = reward_var, + reward_fix = reward_fix + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/cra_linear.R b/R/cra_linear.R old mode 100755 new mode 100644 index 6d3279e6..b6746d5f --- a/R/cra_linear.R +++ b/R/cra_linear.R @@ -1,376 +1,84 @@ -#' Choice under Risk and Ambiguity Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Choice under Risk and Ambiguity Task -#' with the following parameters: -#' "alpha" (risk attitude), -#' "beta" (ambiguity attitude), and -#' "gamma" (inverse temperature). -#' -#' Contributor: \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} -#' -#' \strong{MODEL:} -#' Linear Subjective Value Model (Levy et al., 2010, J Neurophysiol) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: -#' "subjID", "prob", "ambig", "reward_var", "reward_fix", and "choice". -#' See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"cra_linear"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Choice under Risk and Ambiguity Task, there should be five columns of data with the labels -#' "subjID", "prob", "ambig", "reward_var", "reward_fix" and "choice". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"prob"}}{Objective probability of a variable lottery.} -#' \item{\code{"ambig"}}{Ambiguity levels of variable lotteries. For a risky lottery, \code{"ambig"} equals 0, and more than zero for an ambiguous lottery} -#' \item{\code{"reward_var"}}{Amounts of reward values in variable lotteries. \code{"reward_var"} is assumed to be greater than zero.} -#' \item{\code{"reward_fix"}}{Amounts of reward values in fixed lotteries. \code{"reward_fix"} is assumed to be greater than zero.} -#' \item{\code{"choice"}}{If the variable lottery was taken, \code{"choice"} equals 1, otherwise 0.} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION cra_linear +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} +#' @templateVar TASK_NAME Choice Under Risk and Ambiguity Task +#' @templateVar MODEL_NAME Linear Subjective Value Model +#' @templateVar MODEL_CITE (Levy et al., 2010, J Neurophysiol) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "prob", "ambig", "reward_var", "reward_fix", "choice" +#' @templateVar PARAMETERS "alpha" (risk attitude), "beta" (ambiguity attitude), "gamma" (inverse temperature) +#' @templateVar REGRESSORS "sv", "sv_fix", "sv_var", "p_var" +#' @templateVar LENGTH_DATA_COLUMNS 6 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"prob"}{Objective probability of the variable lottery.} +#' @templateVar DETAILS_DATA_3 \item{"ambig"}{Ambiguity level of the variable lottery (0 for risky lottery; greater than 0 for ambiguous lottery).} +#' @templateVar DETAILS_DATA_4 \item{"reward_var"}{Amount of reward in variable lottery. Assumed to be greater than zero.} +#' @templateVar DETAILS_DATA_5 \item{"reward_fix"}{Amount of reward in fixed lottery. Assumed to be greater than zero.} +#' @templateVar DETAILS_DATA_6 \item{"choice"}{If the variable lottery was selected, choice == 1; otherwise choice == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Levy, I., Snell, J., Nelson, A. J., Rustichini, A., & Glimcher, P. W. (2010). Neural representation of subjective value under risk and ambiguity. -#' Journal of neurophysiology, 103(2), 1036-1047. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- cra_linear(data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -cra_linear <- function(data = "choose", - niter = 2000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "cra_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[, "subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_alpha", "mu_beta", "mu_gamma", - "alpha" , "beta", "gamma", - "log_lik") - - # TODO: Check which indices are needed for regressors - if (modelRegressor) - POI <- c(POI) - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "cra_linear" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - choice <- array(0, c(numSubjs, maxTrials)) - prob <- array(0, c(numSubjs, maxTrials)) - ambig <- array(0, c(numSubjs, maxTrials)) - reward_var <- array(0, c(numSubjs, maxTrials)) - reward_fix <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - - choice[i, 1:useTrials] <- tmp[1:useTrials, "choice"] - prob[i, 1:useTrials] <- tmp[1:useTrials, "prob"] - ambig[i, 1:useTrials] <- tmp[1:useTrials, "ambig"] - reward_var[i, 1:useTrials] <- tmp[1:useTrials, "reward_var"] - reward_fix[i, 1:useTrials] <- tmp[1:useTrials, "reward_fix"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - prob = prob, - ambig = ambig, - reward_var = reward_var, - reward_fix = reward_fix) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 0.0, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") +#' Levy, I., Snell, J., Nelson, A. J., Rustichini, A., & Glimcher, P. W. (2010). Neural +#' representation of subjective value under risk and ambiguity. Journal of Neurophysiology, +#' 103(2), 1036-1047. + +cra_linear <- hBayesDM_model( + task_name = "cra", + model_name = "linear", + data_columns = c("subjID", "prob", "ambig", "reward_var", "reward_fix", "choice"), + parameters = list("alpha" = c(0, 1, 2), + "beta" = c(-Inf, 0, Inf), + "gamma" = c(0, 1, Inf)), + regressors = list("sv" = 2, + "sv_fix" = 2, + "sv_var" = 2, + "p_var" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(0, c(n_subj, t_max)) + prob <- array(0, c(n_subj, t_max)) + ambig <- array(0, c(n_subj, t_max)) + reward_var <- array(0, c(n_subj, t_max)) + reward_fix <- array(0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + prob[i, 1:t] <- DT_subj$prob + ambig[i, 1:t] <- DT_subj$ambig + reward_var[i, 1:t] <- DT_subj$rewardvar + reward_fix[i, 1:t] <- DT_subj$rewardfix } - # TODO: Change expressions of randomly generated values in genInitList - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1] / 2), inits_fixed[2], qnorm(inits_fixed[3])), - sigma = c(1.0, 1.0, 1.0), - alpha_p = rep(qnorm(inits_fixed[1] / 2), numSubjs), - beta_p = rep(inits_fixed[2], numSubjs), - gamma_p = rep(qnorm(inits_fixed[3]), numSubjs) - ) - } + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + prob = prob, + ambig = ambig, + reward_var = reward_var, + reward_fix = reward_fix + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - if (ncore > 1) { - numCores <- parallel::detectCores() - - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m <- stanmodels$cra_linear - - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit <- rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - alpha <- parVals$alpha - beta <- parVals$beta - gamma <- parVals$gamma - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(alpha[, i]), - measureIndPars(beta[, i]), - measureIndPars(gamma[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("alpha", - "beta", - "gamma", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - # TODO: Change this block after re-choosing the proper regressors - if (modelRegressor) { - # Initialize modelRegressor and add model-based regressors - modelRegressor <- NULL - modelData$modelRegressor <- modelRegressor - } - - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/dbdm_prob_weight.R b/R/dbdm_prob_weight.R new file mode 100644 index 00000000..53e2d83d --- /dev/null +++ b/R/dbdm_prob_weight.R @@ -0,0 +1,86 @@ +#' @templateVar MODEL_FUNCTION dbdm_prob_weight +#' @templateVar TASK_NAME Description Based Decison Making Task +#' @templateVar MODEL_NAME Probability Weight Function +#' @templateVar MODEL_CITE (Erev et al., 2010; Hertwig et al., 2004; Jessup et al., 2008) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "opt1hprob", "opt2hprob", "opt1hval", "opt1lval", "opt2hval", "opt2lval", "choice" +#' @templateVar PARAMETERS "tau" (probability weight function), "rho" (subject utility function), "lambda" (loss aversion parameter), "beta" (inverse softmax temperature) +#' @templateVar LENGTH_DATA_COLUMNS 8 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"opt1hprob"}{Possiblity of getting higher value of outcome(opt1hval) when choosing option 1.} +#' @templateVar DETAILS_DATA_3 \item{"opt2hprob"}{Possiblity of getting higher value of outcome(opt2hval) when choosing option 2.} +#' @templateVar DETAILS_DATA_4 \item{"opt1hval"}{Possible (with opt1hprob probability) outcome of option 1.} +#' @templateVar DETAILS_DATA_5 \item{"opt1lval"}{Possible (with (1 - opt1hprob) probability) outcome of option 1.} +#' @templateVar DETAILS_DATA_6 \item{"opt2hval"}{Possible (with opt2hprob probability) outcome of option 2.} +#' @templateVar DETAILS_DATA_7 \item{"opt2lval"}{Possible (with (1 - opt2hprob) probability) outcome of option 2.} +#' @templateVar DETAILS_DATA_8 \item{"choice"}{If option 1 was selected, choice == 1; else if option 2 was selected, choice == 2.} +#' +#' @template model-documentation +#' +#' @export +#' @include hBayesDM_model.R +#' +#' @references +#' Erev, I., Ert, E., Roth, A. E., Haruvy, E., Herzog, S. M., Hau, R., ... & Lebiere, C. (2010). A +#' choice prediction competition: Choices from experience and from description. Journal of +#' Behavioral Decision Making, 23(1), 15-47. +#' +#' Hertwig, R., Barron, G., Weber, E. U., & Erev, I. (2004). Decisions from experience and the +#' effect of rare events in risky choice. Psychological science, 15(8), 534-539. +#' +#' Jessup, R. K., Bishara, A. J., & Busemeyer, J. R. (2008). Feedback produces divergence from +#' prospect theory in descriptive choice. Psychological Science, 19(10), 1015-1022. + +dbdm_prob_weight <- hBayesDM_model( + task_name = "dbdm", + model_name = "prob_weight", + data_columns = c("subjID", "opt1hprob", "opt2hprob", "opt1hval", "opt1lval", "opt2hval", "opt2lval", "choice"), + parameters = list("tau" = c(0, 0.8, 1), + "rho" = c(0, 0.7, 2), + "lambda" = c(0, 2.5, 5), + "beta" = c(0, 0.2, 1)), + preprocess_func = function(raw_data, general_info) { + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + opt1hprob <- array( 0, c(n_subj, t_max)) + opt2hprob <- array( 0, c(n_subj, t_max)) + opt1hval <- array( 0, c(n_subj, t_max)) + opt1lval <- array( 0, c(n_subj, t_max)) + opt2hval <- array( 0, c(n_subj, t_max)) + opt2lval <- array( 0, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + opt1hprob[i, 1:t] <- DT_subj$opt1hprob + opt2hprob[i, 1:t] <- DT_subj$opt2hprob + opt1hval[i, 1:t] <- DT_subj$opt1hval + opt1lval[i, 1:t] <- DT_subj$opt1lval + opt2hval[i, 1:t] <- DT_subj$opt2hval + opt2lval[i, 1:t] <- DT_subj$opt2lval + choice[i, 1:t] <- DT_subj$choice + } + + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + opt1hprob = opt1hprob, + opt2hprob = opt2hprob, + opt1hval = opt1hval, + opt1lval = opt1lval, + opt2hval = opt2hval, + opt2lval = opt2lval, + choice = choice + ) + + return(data_list) + } +) + diff --git a/R/dd_cs.R b/R/dd_cs.R old mode 100755 new mode 100644 index 9182efd8..47bef9d2 --- a/R/dd_cs.R +++ b/R/dd_cs.R @@ -1,361 +1,77 @@ -#' Delay Discounting Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Delay Discounting Task using the following parameters: "r" (exponential discounting rate; impatience), "s" (time-sensitivity), "beta" (inverse temp.). -#' -#' \strong{MODEL:} -#' Constant-Sensitivity (CS) Model (Ebert & Prelec, 2007, Management Science) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"dd_cs"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Delay Discounting Task, there should be six columns of data -#' with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -#' It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -#' correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} -#' \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} -#' \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} -#' \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} -#' \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION dd_cs +#' @templateVar TASK_NAME Delay Discounting Task +#' @templateVar MODEL_NAME Constant-Sensitivity (CS) Model +#' @templateVar MODEL_CITE (Ebert & Prelec, 2007, Management Science) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice" +#' @templateVar PARAMETERS "r" (exponential discounting rate), "s" (impatience), "beta" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 6 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} +#' @templateVar DETAILS_DATA_3 \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} +#' @templateVar DETAILS_DATA_4 \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} +#' @templateVar DETAILS_DATA_5 \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} +#' @templateVar DETAILS_DATA_6 \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Ebert, J. E. J., & Prelec, D. (2007). The Fragility of Time: Time-Insensitivity and Valuation of the Near and Far Future. -#' Management Science. http://doi.org/10.1287/mnsc.1060.0671 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- dd_cs(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -dd_cs <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "dd_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_r", "mu_s", "mu_beta", - "sigma", - "r", "s", "beta", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "dd_cs" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - delay_later <- array(0, c(numSubjs, maxTrials)) - amount_later <- array(0, c(numSubjs, maxTrials)) - delay_sooner <- array(0, c(numSubjs, maxTrials)) - amount_sooner <- array(0, c(numSubjs, maxTrials)) - choice <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - delay_later[i, 1:useTrials] <- tmp$delay_later - amount_later[i, 1:useTrials] <- tmp$amount_later - delay_sooner[i, 1:useTrials] <- tmp$delay_sooner - amount_sooner[i, 1:useTrials] <- tmp$amount_sooner - choice[i, 1:useTrials] <- tmp$choice - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - amount_later = amount_later, - delay_later = delay_later, - amount_sooner = amount_sooner, - delay_sooner = delay_sooner, - choice = choice -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2] / 10), qnorm(inits_fixed[3] / 5)), - sigma = c(1.0, 1.0, 1.0), - r_pr = rep(qnorm(inits_fixed[1]), numSubjs), - s_pr = rep(qnorm(inits_fixed[2]/10), numSubjs), - beta_pr = rep(qnorm(inits_fixed[3]/5), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') +#' Ebert, J. E. J., & Prelec, D. (2007). The Fragility of Time: Time-Insensitivity and Valuation of +#' the Near and Far Future. Management Science. http://doi.org/10.1287/mnsc.1060.0671 + +dd_cs <- hBayesDM_model( + task_name = "dd", + model_name = "cs", + data_columns = c("subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"), + parameters = list("r" = c(0, 0.1, 1), + "s" = c(0, 1, 10), + "beta" = c(0, 1, 5)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + delay_later <- array( 0, c(n_subj, t_max)) + amount_later <- array( 0, c(n_subj, t_max)) + delay_sooner <- array( 0, c(n_subj, t_max)) + amount_sooner <- array( 0, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + delay_later[i, 1:t] <- DT_subj$delaylater + amount_later[i, 1:t] <- DT_subj$amountlater + delay_sooner[i, 1:t] <- DT_subj$delaysooner + amount_sooner[i, 1:t] <- DT_subj$amountsooner + choice[i, 1:t] <- DT_subj$choice } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$dd_cs - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + delay_later = delay_later, + amount_later = amount_later, + delay_sooner = delay_sooner, + amount_sooner = amount_sooner, + choice = choice + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - r <- parVals$r - s <- parVals$s - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(r[, i]), - mean(s[, i]), - mean(beta[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(r[, i]), - median(s[, i]), - median(beta[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(r[, i]), - estimate_mode(s[, i]), - estimate_mode(beta[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("r", - "s", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/dd_cs_single.R b/R/dd_cs_single.R index 453f2019..54c37dc3 100644 --- a/R/dd_cs_single.R +++ b/R/dd_cs_single.R @@ -1,320 +1,60 @@ -#' Delay Discounting Task (Ebert & Prelec, 2007) -#' -#' @description -#' Individual Bayesian Modeling of the Delay Discounting Task using the following parameters: "r" (exponential discounting rate), "s" (impatience), "beta" (inverse temp.). -#' -#' \strong{MODEL:} -#' Constant-Sensitivity (CS) Model (Ebert & Prelec, 2007, Management Science) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{'hBayesDM'} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model ("dd_cs_single").} -#' \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter -#' values (as specified by \code{'indPars'}) for each subject.} -#' \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Delay Discounting Task, there should be six columns of data -#' with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -#' It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -#' correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} -#' \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} -#' \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} -#' \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} -#' \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. +#' @templateVar MODEL_FUNCTION dd_cs_single +#' @templateVar TASK_NAME Delay Discounting Task +#' @templateVar MODEL_NAME Constant-Sensitivity (CS) Model +#' @templateVar MODEL_CITE (Ebert & Prelec, 2007, Management Science) +#' @templateVar MODEL_TYPE Individual +#' @templateVar DATA_COLUMNS "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice" +#' @templateVar PARAMETERS "r" (exponential discounting rate), "s" (impatience), "beta" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 6 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} +#' @templateVar DETAILS_DATA_3 \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} +#' @templateVar DETAILS_DATA_4 \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} +#' @templateVar DETAILS_DATA_5 \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} +#' @templateVar DETAILS_DATA_6 \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} +#' +#' @template model-documentation #' #' @export -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- dd_cs_single(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -dd_cs_single <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "dd_single_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Individual Subjects - subjID <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjID) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("r", "s", "beta", - "logR", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "dd_cs_single" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") +#' @include hBayesDM_model.R +#' +#' @references +#' Ebert, J. E. J., & Prelec, D. (2007). The Fragility of Time: Time-Insensitivity and Valuation of +#' the Near and Far Future. Management Science. http://doi.org/10.1287/mnsc.1060.0671 + +dd_cs_single <- hBayesDM_model( + task_name = "dd", + model_name = "cs", + model_type = "single", + data_columns = c("subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"), + parameters = list("r" = c(NA, 0.1, NA), + "s" = c(NA, 1, NA), + "beta" = c(NA, 1, NA)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + t_subjs <- general_info$t_subjs + + # Extract from raw_data + delay_later <- raw_data$delaylater + amount_later <- raw_data$amountlater + delay_sooner <- raw_data$delaysooner + amount_sooner <- raw_data$amountsooner + choice <- raw_data$choice + + # Wrap into a list for Stan + data_list <- list( + Tsubj = t_subjs, + delay_later = delay_later, + amount_later = amount_later, + delay_sooner = delay_sooner, + amount_sooner = amount_sooner, + choice = choice + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - # Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - # - # for (i in 1:numSubjs) { - # curSubj <- subjList[i] - # Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - # } - - # Setting Tsubj (= number of subjects) - Tsubj = dim(rawdata)[1] - - # Information for user continued - cat(" # of (max) trials of this subject = ", Tsubj, "\n\n") - - delay_later <- rawdata$delay_later - amount_later <- rawdata$amount_later - delay_sooner <- rawdata$delay_sooner - amount_sooner <- rawdata$amount_sooner - choice <- rawdata$choice - - dataList <- list( - Tsubj = Tsubj, - amount_later = amount_later, - delay_later = delay_later, - amount_sooner = amount_sooner, - delay_sooner = delay_sooner, - choice = choice -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - r = inits_fixed[1], - s = inits_fixed[2], - beta = inits_fixed[3] ) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$dd_cs_single - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - parVals <- rstan::extract(fit, permuted = T) - - r <- parVals$r - s <- parVals$s - beta <- parVals$beta - logR <- parVals$logR - - #allIndPars <- array(NA, c(numSubjs, numPars)) - - if (indPars == "mean") { - allIndPars <- c(mean(r), - mean(logR), - mean(s), - mean(beta)) - } else if (indPars == "median") { - allIndPars <- c(median(r), - median(logR), - median(s), - median(beta)) - } else if (indPars == "mode") { - allIndPars <- c(estimate_mode(r), - estimate_mode(logR), - estimate_mode(s), - estimate_mode(beta)) - } - - allIndPars = t(as.data.frame(allIndPars)) - allIndPars = as.data.frame(allIndPars) - colnames(allIndPars) <- c("r", - "logR", - "s", - "beta") - allIndPars$subjID = subjID - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - return(modelData) -} diff --git a/R/dd_exp.R b/R/dd_exp.R old mode 100755 new mode 100644 index d17319fe..9a70e708 --- a/R/dd_exp.R +++ b/R/dd_exp.R @@ -1,354 +1,76 @@ -#' Delay Discounting Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Delay Discounting Task using the following parameters: "r" (exponential discounting rate) & "beta" (inverse temp.). -#' -#' \strong{MODEL:} -#' Exponential Model (Samuelson, 1937, The Review of Economic Studies) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"dd_exp"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Delay Discounting Task, there should be six columns of data -#' with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -#' It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -#' correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} -#' \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} -#' \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} -#' \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} -#' \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION dd_exp +#' @templateVar TASK_NAME Delay Discounting Task +#' @templateVar MODEL_NAME Exponential Model +#' @templateVar MODEL_CITE (Samuelson, 1937, The Review of Economic Studies) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice" +#' @templateVar PARAMETERS "r" (exponential discounting rate), "beta" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 6 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} +#' @templateVar DETAILS_DATA_3 \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} +#' @templateVar DETAILS_DATA_4 \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} +#' @templateVar DETAILS_DATA_5 \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} +#' @templateVar DETAILS_DATA_6 \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Samuelson, P. A. (1937). A Note on Measurement of Utility. The Review of Economic Studies, 4(2), 155. http://doi.org/10.2307/2967612 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- dd_exp(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -dd_exp <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "dd_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 2 - POI <- c("mu_r", "mu_beta", - "sigma", - "r", "beta", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "dd_exp" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - delay_later <- array(0, c(numSubjs, maxTrials)) - amount_later <- array(0, c(numSubjs, maxTrials)) - delay_sooner <- array(0, c(numSubjs, maxTrials)) - amount_sooner <- array(0, c(numSubjs, maxTrials)) - choice <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - delay_later[i, 1:useTrials] <- tmp$delay_later - amount_later[i, 1:useTrials] <- tmp$amount_later - delay_sooner[i, 1:useTrials] <- tmp$delay_sooner - amount_sooner[i, 1:useTrials] <- tmp$amount_sooner - choice[i, 1:useTrials] <- tmp$choice - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - amount_later = amount_later, - delay_later = delay_later, - amount_sooner = amount_sooner, - delay_sooner = delay_sooner, - choice = choice -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]/5)), - sigma = c(1.0, 1.0), - r_pr = rep(qnorm(inits_fixed[1]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[2]/5), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') +#' Samuelson, P. A. (1937). A Note on Measurement of Utility. The Review of Economic Studies, 4(2), +#' 155. http://doi.org/10.2307/2967612 + +dd_exp <- hBayesDM_model( + task_name = "dd", + model_name = "exp", + data_columns = c("subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"), + parameters = list("r" = c(0, 0.1, 1), + "beta" = c(0, 1, 5)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + delay_later <- array( 0, c(n_subj, t_max)) + amount_later <- array( 0, c(n_subj, t_max)) + delay_sooner <- array( 0, c(n_subj, t_max)) + amount_sooner <- array( 0, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + delay_later[i, 1:t] <- DT_subj$delaylater + amount_later[i, 1:t] <- DT_subj$amountlater + delay_sooner[i, 1:t] <- DT_subj$delaysooner + amount_sooner[i, 1:t] <- DT_subj$amountsooner + choice[i, 1:t] <- DT_subj$choice } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$dd_exp - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + delay_later = delay_later, + amount_later = amount_later, + delay_sooner = delay_sooner, + amount_sooner = amount_sooner, + choice = choice + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - r <- parVals$r - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(r[, i]), - mean(beta[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(r[, i]), - median(beta[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(r[, i]), - estimate_mode(beta[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("r", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/dd_hyperbolic.R b/R/dd_hyperbolic.R old mode 100755 new mode 100644 index 4e117753..81adbf67 --- a/R/dd_hyperbolic.R +++ b/R/dd_hyperbolic.R @@ -1,354 +1,75 @@ -#' Delay Discounting Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Delay Discounting Task using the following parameters: "k" (discounting rate), "beta" (inverse temperature). -#' -#' \strong{MODEL:} -#' Hyperbolic Model (Mazur, 1987) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"dd_hyperbolic"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Delay Discounting Task, there should be six columns of data -#' with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -#' It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -#' correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} -#' \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} -#' \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} -#' \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} -#' \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION dd_hyperbolic +#' @templateVar TASK_NAME Delay Discounting Task +#' @templateVar MODEL_NAME Hyperbolic Model +#' @templateVar MODEL_CITE (Mazur, 1987) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice" +#' @templateVar PARAMETERS "k" (discounting rate), "beta" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 6 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} +#' @templateVar DETAILS_DATA_3 \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} +#' @templateVar DETAILS_DATA_4 \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} +#' @templateVar DETAILS_DATA_5 \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} +#' @templateVar DETAILS_DATA_6 \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references #' Mazur, J. E. (1987). An adjustment procedure for studying delayed reinforcement. -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- dd_hyperbolic(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -dd_hyperbolic <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "dd_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 2 - POI <- c("mu_k", "mu_beta", - "sigma", - "k", "beta", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "dd_hyperbolic" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - delay_later <- array(0, c(numSubjs, maxTrials)) - amount_later <- array(0, c(numSubjs, maxTrials)) - delay_sooner <- array(0, c(numSubjs, maxTrials)) - amount_sooner <- array(0, c(numSubjs, maxTrials)) - choice <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - delay_later[i, 1:useTrials] <- tmp$delay_later - amount_later[i, 1:useTrials] <- tmp$amount_later - delay_sooner[i, 1:useTrials] <- tmp$delay_sooner - amount_sooner[i, 1:useTrials] <- tmp$amount_sooner - choice[i, 1:useTrials] <- tmp$choice - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - amount_later = amount_later, - delay_later = delay_later, - amount_sooner = amount_sooner, - delay_sooner = delay_sooner, - choice = choice -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2] / 5)), - sigma = c(1.0, 1.0), - k_pr = rep(qnorm(inits_fixed[1]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[2]/5), numSubjs) -) +dd_hyperbolic <- hBayesDM_model( + task_name = "dd", + model_name = "hyperbolic", + data_columns = c("subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"), + parameters = list("k" = c(0, 0.1, 1), + "beta" = c(0, 1, 5)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + delay_later <- array( 0, c(n_subj, t_max)) + amount_later <- array( 0, c(n_subj, t_max)) + delay_sooner <- array( 0, c(n_subj, t_max)) + amount_sooner <- array( 0, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + delay_later[i, 1:t] <- DT_subj$delaylater + amount_later[i, 1:t] <- DT_subj$amountlater + delay_sooner[i, 1:t] <- DT_subj$delaysooner + amount_sooner[i, 1:t] <- DT_subj$amountsooner + choice[i, 1:t] <- DT_subj$choice } - } else { - genInitList <- "random" - } - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + delay_later = delay_later, + amount_later = amount_later, + delay_sooner = delay_sooner, + amount_sooner = amount_sooner, + choice = choice + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$dd_hyperbolic - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - k <- parVals$k - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(k[, i]), - mean(beta[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(k[, i]), - median(beta[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(k[, i]), - estimate_mode(beta[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("k", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/dd_hyperbolic_single.R b/R/dd_hyperbolic_single.R index 32c35a8d..10dd66e6 100644 --- a/R/dd_hyperbolic_single.R +++ b/R/dd_hyperbolic_single.R @@ -1,314 +1,58 @@ -#' Delay Discounting Task (Ebert & Prelec, 2007) -#' -#' @description -#' Individual Bayesian Modeling of the Delay Discounting Task using the following parameters: "k" (discounting rate), "beta" (inverse temperature). -#' -#' \strong{MODEL:} -#' Hyperbolic -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{'hBayesDM'} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model ("dd_hyperbolic_single").} -#' \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter -#' values (as specified by \code{'indPars'}) for each subject.} -#' \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Delay Discounting Task, there should be six columns of data -#' with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -#' It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -#' correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} -#' \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} -#' \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} -#' \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} -#' \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. +#' @templateVar MODEL_FUNCTION dd_hyperbolic_single +#' @templateVar TASK_NAME Delay Discounting Task +#' @templateVar MODEL_NAME Hyperbolic Model +#' @templateVar MODEL_CITE (Mazur, 1987) +#' @templateVar MODEL_TYPE Individual +#' @templateVar DATA_COLUMNS "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice" +#' @templateVar PARAMETERS "k" (discounting rate), "beta" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 6 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} +#' @templateVar DETAILS_DATA_3 \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} +#' @templateVar DETAILS_DATA_4 \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} +#' @templateVar DETAILS_DATA_5 \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} +#' @templateVar DETAILS_DATA_6 \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} +#' +#' @template model-documentation #' #' @export -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- dd_hyperbolic_single(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -dd_hyperbolic_single <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "dd_single_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Individual Subjects - subjID <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjID) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 2 - POI <- c("k", "beta", - "logK", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "dd_hyperbolic_single" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") +#' @include hBayesDM_model.R +#' +#' @references +#' Mazur, J. E. (1987). An adjustment procedure for studying delayed reinforcement. + +dd_hyperbolic_single <- hBayesDM_model( + task_name = "dd", + model_name = "hyperbolic", + model_type = "single", + data_columns = c("subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"), + parameters = list("k" = c(NA, 0.1, NA), + "beta" = c(NA, 1, NA)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + t_subjs <- general_info$t_subjs + + # Extract from raw_data + delay_later <- raw_data$delaylater + amount_later <- raw_data$amountlater + delay_sooner <- raw_data$delaysooner + amount_sooner <- raw_data$amountsooner + choice <- raw_data$choice + + # Wrap into a list for Stan + data_list <- list( + Tsubj = t_subjs, + delay_later = delay_later, + amount_later = amount_later, + delay_sooner = delay_sooner, + amount_sooner = amount_sooner, + choice = choice + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - # Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - # - # for (i in 1:numSubjs) { - # curSubj <- subjList[i] - # Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - # } - - # Setting Tsubj (= number of subjects) - Tsubj = dim(rawdata)[1] - - # Information for user continued - cat(" # of (max) trials of this subject = ", Tsubj, "\n\n") - - delay_later <- rawdata$delay_later - amount_later <- rawdata$amount_later - delay_sooner <- rawdata$delay_sooner - amount_sooner <- rawdata$amount_sooner - choice <- rawdata$choice - - dataList <- list( - Tsubj = Tsubj, - amount_later = amount_later, - delay_later = delay_later, - amount_sooner = amount_sooner, - delay_sooner = delay_sooner, - choice = choice -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - k = inits_fixed[1], - beta = inits_fixed[2] ) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$dd_hyperbolic_single - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - parVals <- rstan::extract(fit, permuted = T) - - k <- parVals$k - beta <- parVals$beta - logK <- parVals$logK - - #allIndPars <- array(NA, c(numSubjs, numPars)) - - if (indPars == "mean") { - allIndPars <- c(mean(k), - mean(logK), - mean(beta)) - } else if (indPars == "median") { - allIndPars <- c(median(k), - median(logK), - median(beta)) - } else if (indPars == "mode") { - allIndPars <- c(estimate_mode(k), - estimate_mode(logK), - estimate_mode(beta)) - } - - allIndPars = t(as.data.frame(allIndPars)) - allIndPars = as.data.frame(allIndPars) - colnames(allIndPars) <- c("k", - "logK", - "beta") - allIndPars$subjID = subjID - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - return(modelData) -} diff --git a/R/extract_ic.R b/R/extract_ic.R old mode 100755 new mode 100644 diff --git a/R/gng_m1.R b/R/gng_m1.R old mode 100755 new mode 100644 index 73566065..78c789fd --- a/R/gng_m1.R +++ b/R/gng_m1.R @@ -1,388 +1,75 @@ -#' Orthogonalized Go/Nogo Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Orthogonolized Go/Nogo Task using the following parameters: "xi" (noise), "ep" (learning rate), and "rho" (effective size). -#' -#' \strong{MODEL:} -#' RW + noise (Guitart-Masip et al., 2012, Neuroimage) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "cue", "keyPressed", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors (Q(Go), Q(NoGo))? TRUE or FALSE. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"gng_m1"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Go/No-Go Task, there should be four columns of data with the labels "subjID", -#' "cue", "keyPressed", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"cue"}}{A nominal integer that specifies the cue shown within the given trial (e.g. 1, 2, 3, or 4 in the GNG).} -#' \item{\code{"keyPressed"}}{A binary value representing whether or not the participant responded on the given trial (1 == Press; 0 == No press).} -#' \item{\code{"outcome"}}{A 1, 0, or -1 for each given trial (1 == Positive Feedback; 0 == Neutral Feedback; -1 == Negative Feedback).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION gng_m1 +#' @templateVar TASK_NAME Orthogonalized Go/Nogo Task +#' @templateVar MODEL_NAME RW + noise +#' @templateVar MODEL_CITE (Guitart-Masip et al., 2012, Neuroimage) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "cue", "keyPressed", "outcome" +#' @templateVar PARAMETERS "xi" (noise), "ep" (learning rate), "rho" (effective size) +#' @templateVar REGRESSORS "Qgo", "Qnogo", "Wgo", "Wnogo" +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"cue"}{Nominal integer representing the cue shown for that trial: 1, 2, 3, or 4.} +#' @templateVar DETAILS_DATA_3 \item{"keyPressed"}{Binary value representing the subject's response for that trial (where Press == 1; No press == 0).} +#' @templateVar DETAILS_DATA_4 \item{"outcome"}{Ternary value representing the outcome of that trial (where Positive feedback == 1; Neutral feedback == 0; Negative feedback == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). Go and no-go learning in -#' reward and punishment: Interactions between affect and effect. Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- gng_m1(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -gng_m1 <- function(data = "choose", - niter = 5000, - nwarmup = 2000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "gng_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_xi", "mu_ep", "mu_rho", - "sigma", - "xi", "ep", "rho", - "log_lik") - - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - POI = c(POI, "Qgo", "Qnogo", "Wgo", "Wnogo") - } - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "gng_m1" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - outcome <- array(0, c(numSubjs, maxTrials)) - pressed <- array(-1, c(numSubjs, maxTrials)) - cue <- array(1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - outcome[i, 1:useTrials] <- tmp$outcome - pressed[i, 1:useTrials] <- tmp$keyPressed - cue[i, 1:useTrials] <- tmp$cue - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - outcome = outcome, - pressed = pressed, - cue = cue, - numPars = numPars -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.10, 0.20, exp(2.0)) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), log(inits_fixed[3])), - sigma = c(1.0, 1.0, 1.0), - xi_pr = rep(qnorm(inits_fixed[1]), numSubjs), - ep_pr = rep(qnorm(inits_fixed[2]), numSubjs), - rho_pr = rep(log(inits_fixed[3]), numSubjs) -) +#' Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). +#' Go and no-go learning in reward and punishment: Interactions between affect and effect. +#' Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 + +gng_m1 <- hBayesDM_model( + task_name = "gng", + model_name = "m1", + data_columns = c("subjID", "cue", "keyPressed", "outcome"), + parameters = list("xi" = c(0, 0.1, 1), + "ep" = c(0, 0.2, 1), + "rho" = c(0, exp(2), Inf)), + regressors = list("Qgo" = 2, + "Qnogo" = 2, + "Wgo" = 2, + "Wnogo" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + cue <- array( 1, c(n_subj, t_max)) + pressed <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + cue[i, 1:t] <- DT_subj$cue + pressed[i, 1:t] <- DT_subj$keypressed + outcome[i, 1:t] <- DT_subj$outcome } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + cue = cue, + pressed = pressed, + outcome = outcome + ) - # Fit the Stan model - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - m = stanmodels$gng_m1_reg - } else { - m = stanmodels$gng_m1 + # Returned data_list will directly be passed to Stan + return(data_list) } - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - xi <- parVals$xi - ep <- parVals$ep - rho <- parVals$rho - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(xi[, i]), - mean(ep[, i]), - mean(rho[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(xi[, i]), - median(ep[, i]), - median(rho[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(xi[, i]), - estimate_mode(ep[, i]), - estimate_mode(rho[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("xi", - "ep", - "rho", - "subjID") - - # model-based regressors? - if (modelRegressor) { - if (indPars == "mean") { - Qgo = apply(parVals$Qgo, c(2,3), mean) - Qnogo = apply(parVals$Qnogo, c(2,3), mean) - Wgo = apply(parVals$Wgo, c(2,3), mean) - Wnogo = apply(parVals$Wnogo, c(2,3), mean) - } else if (indPars == "median") { - Qgo = apply(parVals$Qgo, c(2,3), median) - Qnogo = apply(parVals$Qnogo, c(2,3), median) - Wgo = apply(parVals$Wgo, c(2,3), median) - Wnogo = apply(parVals$Wnogo, c(2,3), median) - } else if (indPars == "mode") { - Qgo = apply(parVals$Qgo, c(2,3), estimate_mode) # using mfv function - Qnogo = apply(parVals$Qnogo, c(2,3), estimate_mode) # using mfv function - Wgo = apply(parVals$Wgo, c(2,3), estimate_mode) # using mfv function - Wnogo = apply(parVals$Wnogo, c(2,3), estimate_mode) # using mfv function - } - # initialize modelRegressor and add model-based regressors - modelRegressor = NULL - modelRegressor$Qgo = Qgo - modelRegressor$Qnogo = Qnogo - modelRegressor$Wgo = Wgo - modelRegressor$Wnogo = Wnogo - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata, modelRegressor) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata", "modelRegressor") - } else { - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - } - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") +) - return(modelData) -} diff --git a/R/gng_m2.R b/R/gng_m2.R old mode 100755 new mode 100644 index 23b40454..278eb6c5 --- a/R/gng_m2.R +++ b/R/gng_m2.R @@ -1,396 +1,76 @@ -#' Orthogonalized Go/Nogo Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Orthogonolized Go/Nogo Task using the following parameters: "xi" (noise), "ep" (learning rate), "b" (action bias) and "rho" (effective size). -#' -#' \strong{MODEL:} -#' RW + noise + bias (Guitart-Masip et al., 2012, Neuroimage) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "cue", "keyPressed", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors (Q(Go), Q(NoGo))? TRUE or FALSE. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"gng_m2"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Go/No-Go Task, there should be four columns of data with the labels "subjID", -#' "cue", "keyPressed", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"cue"}}{A nominal integer that specifies the cue shown within the given trial (e.g. 1, 2, 3, or 4 in the GNG).} -#' \item{\code{"keyPressed"}}{A binary value representing whether or not the participant responded on the given trial (1 == Press; 0 == No press).} -#' \item{\code{"outcome"}}{A 1, 0, or -1 for each given trial (1 == Positive Feedback; 0 == Neutral Feedback; -1 == Negative Feedback).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION gng_m2 +#' @templateVar TASK_NAME Orthogonalized Go/Nogo Task +#' @templateVar MODEL_NAME RW + noise + bias +#' @templateVar MODEL_CITE (Guitart-Masip et al., 2012, Neuroimage) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "cue", "keyPressed", "outcome" +#' @templateVar PARAMETERS "xi" (noise), "ep" (learning rate), "b" (action bias), "rho" (effective size) +#' @templateVar REGRESSORS "Qgo", "Qnogo", "Wgo", "Wnogo" +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"cue"}{Nominal integer representing the cue shown for that trial: 1, 2, 3, or 4.} +#' @templateVar DETAILS_DATA_3 \item{"keyPressed"}{Binary value representing the subject's response for that trial (where Press == 1; No press == 0).} +#' @templateVar DETAILS_DATA_4 \item{"outcome"}{Ternary value representing the outcome of that trial (where Positive feedback == 1; Neutral feedback == 0; Negative feedback == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). Go and no-go learning in -#' reward and punishment: Interactions between affect and effect. Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- gng_m2(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -gng_m2 <- function(data = "choose", - niter = 5000, - nwarmup = 2000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "gng_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("mu_xi", "mu_ep", "mu_b", "mu_rho", - "sigma", - "xi", "ep", "b", "rho", - "log_lik") - - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - POI = c(POI, "Qgo", "Qnogo", "Wgo", "Wnogo") - } - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "gng_m2" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - outcome <- array(0, c(numSubjs, maxTrials)) - pressed <- array(-1, c(numSubjs, maxTrials)) - cue <- array(1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - outcome[i, 1:useTrials] <- tmp$outcome - pressed[i, 1:useTrials] <- tmp$keyPressed - cue[i, 1:useTrials] <- tmp$cue - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - outcome = outcome, - pressed = pressed, - cue = cue, - numPars = numPars -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.10, 0.20, 0.00, exp(2.0)) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), inits_fixed[3], log(inits_fixed[4])), - sigma = c(1.0, 1.0, 1.0, 1.0), - xi_pr = rep(qnorm(inits_fixed[1]), numSubjs), - ep_pr = rep(qnorm(inits_fixed[2]), numSubjs), - b_pr = rep(inits_fixed[3], numSubjs), - rho_pr = rep(log(inits_fixed[4]), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - m = stanmodels$gng_m2_reg - } else { - m = stanmodels$gng_m2 - } - - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - xi <- parVals$xi - ep <- parVals$ep - b <- parVals$b - rho <- parVals$rho - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(xi[, i]), - mean(ep[, i]), - mean(b[, i]), - mean(rho[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(xi[, i]), - median(ep[, i]), - median(b[, i]), - median(rho[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(xi[, i]), - estimate_mode(ep[, i]), - estimate_mode(b[, i]), - estimate_mode(rho[, i])) +#' Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). +#' Go and no-go learning in reward and punishment: Interactions between affect and effect. +#' Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 + +gng_m2 <- hBayesDM_model( + task_name = "gng", + model_name = "m2", + data_columns = c("subjID", "cue", "keyPressed", "outcome"), + parameters = list("xi" = c(0, 0.1, 1), + "ep" = c(0, 0.2, 1), + "b" = c(-Inf, 0, Inf), + "rho" = c(0, exp(2), Inf)), + regressors = list("Qgo" = 2, + "Qnogo" = 2, + "Wgo" = 2, + "Wnogo" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + cue <- array( 1, c(n_subj, t_max)) + pressed <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + cue[i, 1:t] <- DT_subj$cue + pressed[i, 1:t] <- DT_subj$keypressed + outcome[i, 1:t] <- DT_subj$outcome } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("xi", - "ep", - "b", - "rho", - "subjID") + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + cue = cue, + pressed = pressed, + outcome = outcome + ) - # model-based regressors? - if (modelRegressor) { - if (indPars == "mean") { - Qgo = apply(parVals$Qgo, c(2,3), mean) - Qnogo = apply(parVals$Qnogo, c(2,3), mean) - Wgo = apply(parVals$Wgo, c(2,3), mean) - Wnogo = apply(parVals$Wnogo, c(2,3), mean) - } else if (indPars == "median") { - Qgo = apply(parVals$Qgo, c(2,3), median) - Qnogo = apply(parVals$Qnogo, c(2,3), median) - Wgo = apply(parVals$Wgo, c(2,3), median) - Wnogo = apply(parVals$Wnogo, c(2,3), median) - } else if (indPars == "mode") { - Qgo = apply(parVals$Qgo, c(2,3), estimate_mode) # using mfv function - Qnogo = apply(parVals$Qnogo, c(2,3), estimate_mode) # using mfv function - Wgo = apply(parVals$Wgo, c(2,3), estimate_mode) # using mfv function - Wnogo = apply(parVals$Wnogo, c(2,3), estimate_mode) # using mfv function - } - # initialize modelRegressor and add model-based regressors - modelRegressor = NULL - modelRegressor$Qgo = Qgo - modelRegressor$Qnogo = Qnogo - modelRegressor$Wgo = Wgo - modelRegressor$Wnogo = Wnogo - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata, modelRegressor) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata", "modelRegressor") - } else { - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") + # Returned data_list will directly be passed to Stan + return(data_list) } - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") +) - return(modelData) -} diff --git a/R/gng_m3.R b/R/gng_m3.R old mode 100755 new mode 100644 index 34092cf3..ba6b1583 --- a/R/gng_m3.R +++ b/R/gng_m3.R @@ -1,405 +1,78 @@ -#' Orthogonalized Go/Nogo Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Orthogonolized Go/Nogo Task using the following parameters: "xi" (noise), "ep" (learning rate), "b" (action bias), "pi" (Pavlovian bias), and "rho" (effective size). -#' -#' \strong{MODEL:} -#' RW + noise + bias + pi (Guitart-Masip et al., 2012, Neuroimage) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "cue", "keyPressed", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors (Q(Go), Q(NoGo))? TRUE or FALSE. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"gng_m3"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Go/No-Go Task, there should be four columns of data with the labels "subjID", -#' "cue", "keyPressed", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"cue"}}{A nominal integer that specifies the cue shown within the given trial (e.g. 1, 2, 3, or 4 in the GNG).} -#' \item{\code{"keyPressed"}}{A binary value representing whether or not the participant responded on the given trial (1 == Press; 0 == No press).} -#' \item{\code{"outcome"}}{A 1, 0, or -1 for each given trial (1 == Positive Feedback; 0 == Neutral Feedback; -1 == Negative Feedback).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION gng_m3 +#' @templateVar TASK_NAME Orthogonalized Go/Nogo Task +#' @templateVar MODEL_NAME RW + noise + bias + pi +#' @templateVar MODEL_CITE (Guitart-Masip et al., 2012, Neuroimage) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "cue", "keyPressed", "outcome" +#' @templateVar PARAMETERS "xi" (noise), "ep" (learning rate), "b" (action bias), "pi" (Pavlovian bias), "rho" (effective size) +#' @templateVar REGRESSORS "Qgo", "Qnogo", "Wgo", "Wnogo", "SV" +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"cue"}{Nominal integer representing the cue shown for that trial: 1, 2, 3, or 4.} +#' @templateVar DETAILS_DATA_3 \item{"keyPressed"}{Binary value representing the subject's response for that trial (where Press == 1; No press == 0).} +#' @templateVar DETAILS_DATA_4 \item{"outcome"}{Ternary value representing the outcome of that trial (where Positive feedback == 1; Neutral feedback == 0; Negative feedback == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). Go and no-go learning in -#' reward and punishment: Interactions between affect and effect. Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- gng_m3(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -gng_m3 <- function(data = "choose", - niter = 5000, - nwarmup = 2000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "gng_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 5 - POI <- c("mu_xi", "mu_ep", "mu_b", "mu_pi", "mu_rho", - "sigma", - "xi", "ep", "b", "pi", "rho", - "log_lik") - - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - POI = c(POI, "Qgo", "Qnogo", "Wgo", "Wnogo", "SV") - } - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "gng_m3" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - outcome <- array(0, c(numSubjs, maxTrials)) - pressed <- array(-1, c(numSubjs, maxTrials)) - cue <- array(1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - outcome[i, 1:useTrials] <- tmp$outcome - pressed[i, 1:useTrials] <- tmp$keyPressed - cue[i, 1:useTrials] <- tmp$cue - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - outcome = outcome, - pressed = pressed, - cue = cue, - numPars = numPars -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 0.2, 0.1, 0.1, exp(2.0)) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), inits_fixed[3], inits_fixed[4], log(inits_fixed[5])), - sigma = c(1.0, 1.0, 1.0, 1.0, 1.0), - xi_pr = rep(qnorm(inits_fixed[1]), numSubjs), - ep_pr = rep(qnorm(inits_fixed[2]), numSubjs), - b_pr = rep(inits_fixed[3], numSubjs), - pi_pr = rep(inits_fixed[4], numSubjs), - rho_pr = rep(log(inits_fixed[5]), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - m = stanmodels$gng_m3_reg - } else { - m = stanmodels$gng_m3 - } - - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - xi <- parVals$xi - ep <- parVals$ep - b <- parVals$b - pi <- parVals$pi - rho <- parVals$rho - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(xi[, i]), - mean(ep[, i]), - mean(b[, i]), - mean(pi[, i]), - mean(rho[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(xi[, i]), - median(ep[, i]), - median(b[, i]), - median(pi[, i]), - median(rho[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(xi[, i]), - estimate_mode(ep[, i]), - estimate_mode(b[, i]), - estimate_mode(pi[, i]), - estimate_mode(rho[, i])) +#' Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). +#' Go and no-go learning in reward and punishment: Interactions between affect and effect. +#' Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 + +gng_m3 <- hBayesDM_model( + task_name = "gng", + model_name = "m3", + data_columns = c("subjID", "cue", "keyPressed", "outcome"), + parameters = list("xi" = c(0, 0.1, 1), + "ep" = c(0, 0.2, 1), + "b" = c(-Inf, 0, Inf), + "pi" = c(-Inf, 0, Inf), + "rho" = c(0, exp(2), Inf)), + regressors = list("Qgo" = 2, + "Qnogo" = 2, + "Wgo" = 2, + "Wnogo" = 2, + "SV" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + cue <- array( 1, c(n_subj, t_max)) + pressed <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + cue[i, 1:t] <- DT_subj$cue + pressed[i, 1:t] <- DT_subj$keypressed + outcome[i, 1:t] <- DT_subj$outcome } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("xi", - "ep", - "b", - "pi", - "rho", - "subjID") + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + cue = cue, + pressed = pressed, + outcome = outcome + ) - # model-based regressors? - if (modelRegressor) { - if (indPars == "mean") { - Qgo = apply(parVals$Qgo, c(2,3), mean) - Qnogo = apply(parVals$Qnogo, c(2,3), mean) - Wgo = apply(parVals$Wgo, c(2,3), mean) - Wnogo = apply(parVals$Wnogo, c(2,3), mean) - SV = apply(parVals$SV, c(2,3), mean) - } else if (indPars == "median") { - Qgo = apply(parVals$Qgo, c(2,3), median) - Qnogo = apply(parVals$Qnogo, c(2,3), median) - Wgo = apply(parVals$Wgo, c(2,3), median) - Wnogo = apply(parVals$Wnogo, c(2,3), median) - SV = apply(parVals$SV, c(2,3), median) - } else if (indPars == "mode") { - Qgo = apply(parVals$Qgo, c(2,3), estimate_mode) # using mfv function - Qnogo = apply(parVals$Qnogo, c(2,3), estimate_mode) # using mfv function - Wgo = apply(parVals$Wgo, c(2,3), estimate_mode) # using mfv function - Wnogo = apply(parVals$Wnogo, c(2,3), estimate_mode) # using mfv function - SV = apply(parVals$SV, c(2,3), estimate_mode) # using mfv function - } - # initialize modelRegressor and add model-based regressors - modelRegressor = NULL - modelRegressor$Qgo = Qgo - modelRegressor$Qnogo = Qnogo - modelRegressor$Wgo = Wgo - modelRegressor$Wnogo = Wnogo - modelRegressor$SV = SV - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata, modelRegressor) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata", "modelRegressor") - } else { - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") + # Returned data_list will directly be passed to Stan + return(data_list) } - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") +) - return(modelData) -} diff --git a/R/gng_m4.R b/R/gng_m4.R old mode 100755 new mode 100644 index c19b05b8..501ad943 --- a/R/gng_m4.R +++ b/R/gng_m4.R @@ -1,407 +1,79 @@ -#' Orthogonalized Go/Nogo Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Orthogonolized Go/Nogo Task using the following parameters: "xi" (noise), "ep" (learning rate), "b" (action bias), "pi" (Pavlovian bias), "rhoRew" (reward sensitivity), and "rhoPun" (punishment sensitivity) -#' -#' \strong{MODEL:} -#' RW (rew/pun) + noise + bias + pi (Cavanagh et al., 2013, J Neuro) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "cue", "keyPressed", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors (Q(Go), Q(NoGo))? TRUE or FALSE. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"gng_m4"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Go/No-Go Task, there should be four columns of data with the labels "subjID", -#' "cue", "keyPressed", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"cue"}}{A nominal integer that specifies the cue shown within the given trial (e.g. 1, 2, 3, or 4 in the GNG).} -#' \item{\code{"keyPressed"}}{A binary value representing whether or not the participant responded on the given trial (1 == Press; 0 == No press).} -#' \item{\code{"outcome"}}{A 1, 0, or -1 for each given trial (1 == Positive Feedback; 0 == Neutral Feedback; -1 == Negative Feedback).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION gng_m4 +#' @templateVar TASK_NAME Orthogonalized Go/Nogo Task +#' @templateVar MODEL_NAME RW (rew/pun) + noise + bias + pi +#' @templateVar MODEL_CITE (Cavanagh et al., 2013, J Neuro) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "cue", "keyPressed", "outcome" +#' @templateVar PARAMETERS "xi" (noise), "ep" (learning rate), "b" (action bias), "pi" (Pavlovian bias), "rhoRew" (reward sensitivity), "rhoPun" (punishment sensitivity) +#' @templateVar REGRESSORS "Qgo", "Qnogo", "Wgo", "Wnogo", "SV" +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"cue"}{Nominal integer representing the cue shown for that trial: 1, 2, 3, or 4.} +#' @templateVar DETAILS_DATA_3 \item{"keyPressed"}{Binary value representing the subject's response for that trial (where Press == 1; No press == 0).} +#' @templateVar DETAILS_DATA_4 \item{"outcome"}{Ternary value representing the outcome of that trial (where Positive feedback == 1; Neutral feedback == 0; Negative feedback == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Cavanagh, J. F., Eisenberg, I., Guitart-Masip, M., Huys, Q., & Frank, M. J. (2013). Frontal Theta Overrides Pavlovian -#' Learning Biases. Journal of Neuroscience, 33(19), 8541-8548. http://doi.org/10.1523/JNEUROSCI.5754-12.2013 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- gng_m4(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -gng_m4 <- function(data = "choose", - niter = 5000, - nwarmup = 2000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "gng_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 6 - POI <- c("mu_xi", "mu_ep", "mu_b", "mu_pi", "mu_rhoRew", "mu_rhoPun", - "sigma", - "xi", "ep", "b", "pi", "rhoRew", "rhoPun", - "log_lik") - - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - POI = c(POI, "Qgo", "Qnogo", "Wgo", "Wnogo", "SV") - } - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "gng_m4" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - outcome <- array(0, c(numSubjs, maxTrials)) - pressed <- array(-1, c(numSubjs, maxTrials)) - cue <- array(1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - outcome[i, 1:useTrials] <- tmp$outcome - pressed[i, 1:useTrials] <- tmp$keyPressed - cue[i, 1:useTrials] <- tmp$cue - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - outcome = outcome, - pressed = pressed, - cue = cue, - numPars = numPars -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.10, 0.20, 0.00, 0.00, exp(2.0), exp(2.0)) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } +#' Cavanagh, J. F., Eisenberg, I., Guitart-Masip, M., Huys, Q., & Frank, M. J. (2013). Frontal Theta +#' Overrides Pavlovian Learning Biases. Journal of Neuroscience, 33(19), 8541-8548. +#' http://doi.org/10.1523/JNEUROSCI.5754-12.2013 + +gng_m4 <- hBayesDM_model( + task_name = "gng", + model_name = "m4", + data_columns = c("subjID", "cue", "keyPressed", "outcome"), + parameters = list("xi" = c(0, 0.1, 1), + "ep" = c(0, 0.2, 1), + "b" = c(-Inf, 0, Inf), + "pi" = c(-Inf, 0, Inf), + "rhoRew" = c(0, exp(2), Inf), + "rhoPun" = c(0, exp(2), Inf)), + regressors = list("Qgo" = 2, + "Qnogo" = 2, + "Wgo" = 2, + "Wnogo" = 2, + "SV" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + cue <- array( 1, c(n_subj, t_max)) + pressed <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + cue[i, 1:t] <- DT_subj$cue + pressed[i, 1:t] <- DT_subj$keypressed + outcome[i, 1:t] <- DT_subj$outcome } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), inits_fixed[3], inits_fixed[4], log(inits_fixed[5]), log(inits_fixed[6])), - sigma = c(1.0, 1.0, 1.0, 1.0, 1.0, 1.0), - xi_pr = rep(qnorm(inits_fixed[1]), numSubjs), - ep_pr = rep(qnorm(inits_fixed[2]), numSubjs), - b_pr = rep(inits_fixed[3], numSubjs), - pi_pr = rep(inits_fixed[4], numSubjs), - rhoRew_pr = rep(log(inits_fixed[5]), numSubjs), - rhoPun_pr = rep(log(inits_fixed[6]), numSubjs) -) - } - } else { - genInitList <- "random" - } - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + cue = cue, + pressed = pressed, + outcome = outcome + ) - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - m = stanmodels$gng_m4_reg - } else { - m = stanmodels$gng_m4 + # Returned data_list will directly be passed to Stan + return(data_list) } +) - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - xi <- parVals$xi - ep <- parVals$ep - b <- parVals$b - pi <- parVals$pi - rhoRew <- parVals$rhoRew - rhoPun <- parVals$rhoPun - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(xi[, i]), - mean(ep[, i]), - mean(b[, i]), - mean(pi[, i]), - mean(rhoRew[, i]), - mean(rhoPun[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(xi[, i]), - median(ep[, i]), - median(b[, i]), - median(pi[, i]), - median(rhoRew[, i]), - median(rhoPun[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(xi[, i]), - estimate_mode(ep[, i]), - estimate_mode(b[, i]), - estimate_mode(pi[, i]), - estimate_mode(rhoRew[, i]), - estimate_mode(rhoPun[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("xi", - "ep", - "b", - "pi", - "rhoRew", - "rhoPun", - "subjID") - - # model-based regressors? - if (modelRegressor) { - if (indPars == "mean") { - Qgo = apply(parVals$Qgo, c(2,3), mean) - Qnogo = apply(parVals$Qnogo, c(2,3), mean) - Wgo = apply(parVals$Wgo, c(2,3), mean) - Wnogo = apply(parVals$Wnogo, c(2,3), mean) - SV = apply(parVals$SV, c(2,3), mean) - } else if (indPars == "median") { - Qgo = apply(parVals$Qgo, c(2,3), median) - Qnogo = apply(parVals$Qnogo, c(2,3), median) - Wgo = apply(parVals$Wgo, c(2,3), median) - Wnogo = apply(parVals$Wnogo, c(2,3), median) - SV = apply(parVals$SV, c(2,3), median) - } else if (indPars == "mode") { - Qgo = apply(parVals$Qgo, c(2,3), estimate_mode) # using mfv function - Qnogo = apply(parVals$Qnogo, c(2,3), estimate_mode) # using mfv function - Wgo = apply(parVals$Wgo, c(2,3), estimate_mode) # using mfv function - Wnogo = apply(parVals$Wnogo, c(2,3), estimate_mode) # using mfv function - SV = apply(parVals$SV, c(2,3), estimate_mode) # using mfv function - } - # initialize modelRegressor and add model-based regressors - modelRegressor = NULL - modelRegressor$Qgo = Qgo - modelRegressor$Qnogo = Qnogo - modelRegressor$Wgo = Wgo - modelRegressor$Wnogo = Wnogo - modelRegressor$SV = SV - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata, modelRegressor) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata", "modelRegressor") - } else { - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - } - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/hBayesDM.R b/R/hBayesDM.R old mode 100755 new mode 100644 diff --git a/R/hBayesDM_model.R b/R/hBayesDM_model.R new file mode 100644 index 00000000..577687f2 --- /dev/null +++ b/R/hBayesDM_model.R @@ -0,0 +1,485 @@ +#' hBayesDM Model Base Function +#' +#' @description +#' The base function from which all hBayesDM model functions are created. +#' +#' Contributor: \href{https://ccs-lab.github.io/team/jethro-lee/}{Jethro Lee} +#' +#' @export +#' @keywords internal +#' +#' @include settings.R +#' @include stanmodels.R +#' @importFrom utils head +#' @importFrom stats complete.cases qnorm median +#' @importFrom data.table fread +#' @importFrom parallel detectCores +#' @importFrom rstan stan_model vb sampling extract +#' +#' @param task_name Character value for name of task. E.g. \code{"gng"}. +#' @param model_name Character value for name of model. E.g. \code{"m1"}. +#' @param model_type Character value for modeling type: \code{""} OR \code{"single"} OR +#' \code{"multipleB"}. +#' @param data_columns Character vector of necessary column names for the data. E.g. +#' \code{c("subjID", "cue", "keyPressed", "outcome")}. +#' @param parameters List of parameters, with information about their lower bound, plausible value, +#' upper bound. E.g. \code{list("xi" = c(0, 0.1, 1), "ep" = c(0, 0.2, 1), "rho" = c(0, exp(2), +#' Inf))}. +#' @param regressors List of regressors, with information about their extracted dimensions. E.g. +#' \code{list("Qgo" = 2, "Qnogo" = 2, "Wgo" = 2, "Wnogo" = 2)}. OR if model-based regressors are +#' not available for this model, \code{NULL}. +#' @param postpreds Character vector of name(s) for the trial-level posterior predictive +#' simulations. Default is \code{"y_pred"}. OR if posterior predictions are not yet available for +#' this model, \code{NULL}. +#' @param stanmodel_arg Leave as \code{NULL} (default) for completed models. Else should either be a +#' character value (specifying the name of a Stan file) OR a \code{stanmodel} object (returned as +#' a result of running \code{\link[rstan]{stan_model}}). +#' @param preprocess_func Function to preprocess the raw data before it gets passed to Stan. Takes +#' (at least) two arguments: a data.table object \code{raw_data} and a list object +#' \code{general_info}. Possible to include additional argument(s) to use during preprocessing. +#' Should return a list object \code{data_list}, which will then directly be passed to Stan. +#' +#' @details +#' \strong{task_name}: Typically same task models share the same data column requirements. +#' +#' \strong{model_name}: Typically different models are distinguished by their different list of +#' parameters. +#' +#' \strong{model_type} is one of the following three: +#' \describe{ +#' \item{\code{""}}{Modeling of multiple subjects. (Default hierarchical Bayesian analysis.)} +#' \item{\code{"single"}}{Modeling of a single subject.} +#' \item{\code{"multipleB"}}{Modeling of multiple subjects, where multiple blocks exist within +#' each subject.} +#' } +#' +#' \strong{data_columns} must be the entirety of necessary data columns used at some point in the R +#' or Stan code. I.e. \code{"subjID"} must always be included. In the case of 'multipleB' type +#' models, \code{"block"} should also be included as well. +#' +#' \strong{parameters} is a list object, whose keys are the parameters of this model. Each parameter +#' key must be assigned a numeric vector holding 3 elements: the parameter's lower bound, +#' plausible value, and upper bound. +#' +#' \strong{regressors} is a list object, whose keys are the model-based regressors of this model. +#' Each regressor key must be assigned a numeric value indicating the number of dimensions its +#' data will be extracted as. If model-based regressors are not available for this model, this +#' argument should just be \code{NULL}. +#' +#' \strong{postpreds} defaults to \code{"y_pred"}, but any other character vector holding +#' appropriate names is possible (c.f. Two-Step Task models). If posterior predictions are not yet +#' available for this model, this argument should just be \code{NULL}. +#' +#' \strong{stanmodel_arg} can be used by developers, during the developmental stage of creating a +#' new model function. If this argument is passed a character value, the Stan file with the +#' corresponding name will be used for model fitting. If this argument is passed a +#' \code{stanmodel} object, that \code{stanmodel} object will be used for model fitting. When +#' creation of the model function is complete, this argument should just be left as \code{NULL}. +#' +#' \strong{preprocess_func} is the part of the code that is specific to the model, and is thus +#' written in the specific model R file.\cr +#' Arguments for this function are: +#' \describe{ +#' \item{\code{raw_data}}{A data.table that holds the raw user data, which was read by using +#' \code{\link[data.table]{fread}}.} +#' \item{\code{general_info}}{A list that holds the general informations about the raw data, i.e. +#' \code{subjs}, \code{n_subj}, \code{t_subjs}, \code{t_max}, \code{b_subjs}, \code{b_max}.} +#' \item{\code{...}}{Optional additional argument(s) that specific model functions may want to +#' include. Examples of such additional arguments currently being used in hBayesDM models are: +#' \code{RTbound} (choiceRT_ddm models), \code{payscale} (igt models), and \code{trans_prob} (ts +#' models).} +#' } +#' Return value for this function should be: +#' \describe{ +#' \item{\code{data_list}}{A list with appropriately named keys (as required by the model Stan +#' file), holding the fully preprocessed user data.} +#' } +#' NOTE: Syntax for data.table slightly differs from that of data.frame. If you want to use +#' \code{raw_data} as a data.frame when writing the \code{preprocess_func}, simply begin with the +#' line: \code{raw_data <- as.data.frame(raw_data)}.\cr +#' NOTE: Because of allowing case & underscore insensitive column names in user data, +#' \code{raw_data} columns must now be referenced by their lowercase non-underscored versions, +#' e.g. \code{"subjid"}, within the code of the preprocess function.\cr +#' +#' @return A specific hBayesDM model function. + +hBayesDM_model <- function(task_name, + model_name, + model_type = "", + data_columns, + parameters, + regressors = NULL, + postpreds = "y_pred", + stanmodel_arg = NULL, + preprocess_func) { + + # The resulting hBayesDM model function to be returned + function(data = "choose", + niter = 4000, + nwarmup = 1000, + nchain = 4, + ncore = 1, + nthin = 1, + inits = "random", + indPars = "mean", + modelRegressor = FALSE, + vb = FALSE, + inc_postpred = FALSE, + adapt_delta = 0.95, + stepsize = 1, + max_treedepth = 10, + ...) { + + ############### Stop checks ############### + + # Check if regressor available for this model + if (modelRegressor && is.null(regressors)) { + stop("** Model-based regressors are not available for this model. **\n") + } + + # Check if postpred available for this model + if (inc_postpred && is.null(postpreds)) { + stop("** Posterior predictions are not yet available for this model. **\n") + } + + # For using "example" or "choose" data + if (data == "example") { + if (model_type == "") { + exampleData <- paste0(task_name, "_", "exampleData.txt") + } else { + exampleData <- paste0(task_name, "_", model_type, "_", "exampleData.txt") + } + data <- system.file("extdata", exampleData, package = "hBayesDM") + } else if (data == "choose") { + data <- file.choose() + } + + # Check if data file exists + if (!file.exists(data)) { + stop("** Data file does not exist. Please check again. **\n", + " e.g. data = \"MySubFolder/myData.txt\"\n") + } + + # Load the data + raw_data <- data.table::fread(file = data, header = TRUE, sep = "\t", data.table = TRUE, + fill = TRUE, stringsAsFactors = TRUE, logical01 = FALSE) + # NOTE: Separator is fixed to "\t" because fread() has trouble reading space delimited files + # that have missing values. + + # Save initial colnames of raw_data for later + colnames_raw_data <- colnames(raw_data) + + # Check if necessary data columns all exist (while ignoring case and underscores) + ..insensitive_data_columns <- NULL # To avoid NOTEs by R CMD check + insensitive_data_columns <- tolower(gsub("_", "", data_columns, fixed = TRUE)) + colnames(raw_data) <- tolower(gsub("_", "", colnames(raw_data), fixed = TRUE)) + if (!all(insensitive_data_columns %in% colnames(raw_data))) { + stop("** Data file is missing one or more necessary data columns. Please check again. **\n", + " Necessary data columns are: \"", paste0(data_columns, collapse = "\", \""), "\".\n") + } + + # Remove only the rows containing NAs in necessary columns + complete_rows <- complete.cases(raw_data[, ..insensitive_data_columns]) + sum_incomplete_rows <- sum(!complete_rows) + if (sum_incomplete_rows > 0) { + raw_data <- raw_data[complete_rows, ] + cat("\n") + cat("The following lines of the data file have NAs in necessary columns:\n") + cat(paste0(head(which(!complete_rows), 100) + 1, collapse = ", ")) + if (sum_incomplete_rows > 100) { + cat(", ...") + } + cat(" (total", sum_incomplete_rows, "lines)\n") + cat("These rows are removed prior to modeling the data.\n") + } + + #################################################### + ## Prepare general info about the raw data ##### + #################################################### + + subjs <- NULL # List of unique subjects (1D) + n_subj <- NULL # Total number of subjects (0D) + + b_subjs <- NULL # Number of blocks per each subject (1D) + b_max <- NULL # Maximum number of blocks across all subjects (0D) + + t_subjs <- NULL # Number of trials (per block) per subject (2D or 1D) + t_max <- NULL # Maximum number of trials across all blocks & subjects (0D) + + # To avoid NOTEs by R CMD check + .N <- NULL + subjid <- NULL + + if ((model_type == "") || (model_type == "single")) { + DT_trials <- raw_data[, .N, by = "subjid"] + subjs <- DT_trials$subjid + n_subj <- length(subjs) + t_subjs <- DT_trials$N + t_max <- max(t_subjs) + if ((model_type == "single") && (n_subj != 1)) { + stop("** More than 1 unique subjects exist in data file,", + " while using 'single' type model. **\n") + } + } else { # (model_type == "multipleB") + DT_trials <- raw_data[, .N, by = c("subjid", "block")] + DT_blocks <- DT_trials[, .N, by = "subjid"] + subjs <- DT_blocks$subjid + n_subj <- length(subjs) + b_subjs <- DT_blocks$N + b_max <- max(b_subjs) + t_subjs <- array(0, c(n_subj, b_max)) + for (i in 1:n_subj) { + subj <- subjs[i] + b <- b_subjs[i] + t_subjs[i, 1:b] <- DT_trials[subjid == subj]$N + } + t_max <- max(t_subjs) + } + + general_info <- list(subjs, n_subj, b_subjs, b_max, t_subjs, t_max) + names(general_info) <- c("subjs", "n_subj", "b_subjs", "b_max", "t_subjs", "t_max") + + ######################################################### + ## Prepare: data_list ##### + ## pars ##### + ## gen_init for passing to Stan ##### + ######################################################### + + # Preprocess the raw data to pass to Stan + data_list <- preprocess_func(raw_data, general_info, ...) + + # The parameters of interest for Stan + pars <- character() + if (model_type != "single") { + pars <- c(pars, paste0("mu_", names(parameters)), "sigma") + } + pars <- c(pars, names(parameters)) + if ((task_name == "dd") && (model_type == "single")) { + log_parameter1 <- paste0("log", toupper(names(parameters)[1])) + pars <- c(pars, log_parameter1) + } + pars <- c(pars, "log_lik") + if (modelRegressor) { + pars <- c(pars, names(regressors)) + } + if (inc_postpred) { + pars <- c(pars, postpreds) + } + + # Initial values for the parameters + if (inits[1] == "random") { + gen_init <- "random" + } else { + if (inits[1] == "fixed") { + inits <- unlist(lapply(parameters, "[", 2)) # plausible values of each parameter + } else if (length(inits) != length(parameters)) { + stop("** Length of 'inits' must be ", length(parameters), + " (= the number of parameters of this model). Please check again. **\n") + } + if (model_type == "single") { + gen_init <- function() { + individual_level <- as.list(inits) + names(individual_level) <- names(parameters) + return(individual_level) + } + } else { + gen_init <- function() { + primes <- numeric(length(parameters)) + for (i in 1:length(parameters)) { + lb <- parameters[[i]][1] # lower bound + ub <- parameters[[i]][3] # upper bound + if (is.infinite(lb)) { + primes[i] <- inits[i] # (-Inf, Inf) + } else if (is.infinite(ub)) { + primes[i] <- log(inits[i] - lb) # ( lb, Inf) + } else { + primes[i] <- qnorm((inits[i] - lb) / (ub - lb)) # ( lb, ub) + } + } + group_level <- list(mu_pr = primes, + sigma = rep(1.0, length(primes))) + individual_level <- lapply(primes, function(x) rep(x, n_subj)) + names(individual_level) <- paste0(names(parameters), "_pr") + return(c(group_level, individual_level)) + } + } + } + + ############### Print for user ############### + + # Full name of model + if (model_type == "") { + model <- paste0(task_name, "_", model_name) + } else { + model <- paste0(task_name, "_", model_name, "_", model_type) + } + + # Set number of cores for parallel computing + if (ncore <= 1) { + ncore <- 1 + } else { + local_cores <- parallel::detectCores() + if (ncore > local_cores) { + ncore <- local_cores + warning("Number of cores specified for parallel computing greater than", + " number of locally available cores. Using all locally available cores.\n") + } + } + options(mc.cores = ncore) + + # Print for user + cat("\n") + cat("Model name =", model, "\n") + cat("Data file =", data, "\n") + cat("\n") + cat("Details:\n") + if (vb) { + cat(" Using variational inference\n") + } else { + cat(" # of chains =", nchain, "\n") + cat(" # of cores used =", ncore, "\n") + cat(" # of MCMC samples (per chain) =", niter, "\n") + cat(" # of burn-in samples =", nwarmup, "\n") + } + cat(" # of subjects =", n_subj, "\n") + if (model_type == "multipleB") { + cat(" # of (max) blocks per subject =", b_max, "\n") + } + if (model_type == "") { + cat(" # of (max) trials per subject =", t_max, "\n") + } else if (model_type == "multipleB") { + cat(" # of (max) trials...\n") + cat(" ...per block per subject =", t_max, "\n") + } else { + cat(" # of trials (for this subject) =", t_max, "\n") + } + + # Models with additional arguments + if ((task_name == "choiceRT") && (model_name == "ddm")) { + RTbound <- list(...)$RTbound + cat(" `RTbound` is set to =", ifelse(is.null(RTbound), 0.1, RTbound), "\n") + } + if (task_name == "igt") { + payscale <- list(...)$payscale + cat(" `payscale` is set to =", ifelse(is.null(payscale), 100, payscale), "\n") + } + if (task_name == "ts") { + trans_prob <- list(...)$trans_prob + cat(" `trans_prob` is set to =", ifelse(is.null(trans_prob), 0.7, trans_prob), "\n") + } + + # When extracting model-based regressors + if (modelRegressor) { + cat("\n") + cat("**************************************\n") + cat("** Extract model-based regressors **\n") + cat("**************************************\n") + } + + # An empty newline before Stan begins + if (nchain > 1) { + cat("\n") + } + + ############### Fit & extract ############### + + # Designate the Stan model + if (is.null(stanmodel_arg)) { + if (FLAG_BUILD_ALL) { + stanmodel_arg <- stanmodels[[model]] + } else { + model_path <- system.file("stan_files", paste0(model, ".stan"), + package="hBayesDM") + stanmodel_arg <- rstan::stan_model(model_path) + } + } else if (is.character(stanmodel_arg)) { + stanmodel_arg <- rstan::stan_model(stanmodel_arg) + } + + # Fit the Stan model + if (vb) { # if variational Bayesian + fit <- rstan::vb(object = stanmodel_arg, + data = data_list, + pars = pars, + init = gen_init) + } else { + fit <- rstan::sampling(object = stanmodel_arg, + data = data_list, + pars = pars, + init = gen_init, + chains = nchain, + iter = niter, + warmup = nwarmup, + thin = nthin, + control = list(adapt_delta = adapt_delta, + stepsize = stepsize, + max_treedepth = max_treedepth)) + } + + # Extract from the Stan fit object + parVals <- rstan::extract(fit, permuted = TRUE) + + # Trial-level posterior predictive simulations + if (inc_postpred) { + for (pp in postpreds) { + parVals[[pp]][parVals[[pp]] == -1] <- NA + } + } + + # Define measurement of individual parameters + measure_indPars <- switch(indPars, mean = mean, median = median, mode = estimate_mode) + + # Define which individual parameters to measure + which_indPars <- names(parameters) + if ((task_name == "dd") && (model_type == "single")) { + which_indPars <- c(which_indPars, log_parameter1) + } + + # Measure all individual parameters (per subject) + allIndPars <- as.data.frame(array(NA, c(n_subj, length(which_indPars)))) + if (model_type == "single") { + allIndPars[n_subj, ] <- mapply(function(x) measure_indPars(parVals[[x]]), which_indPars) + } else { + for (i in 1:n_subj) { + allIndPars[i, ] <- mapply(function(x) measure_indPars(parVals[[x]][, i]), which_indPars) + } + } + allIndPars <- cbind(subjs, allIndPars) + colnames(allIndPars) <- c("subjID", which_indPars) + + # Model regressors (for model-based neuroimaging, etc.) + if (modelRegressor) { + model_regressor <- list() + for (r in names(regressors)) { + model_regressor[[r]] <- apply(parVals[[r]], c(1:regressors[[r]]) + 1, measure_indPars) + } + } + + # Give back initial colnames and revert data.table to data.frame + colnames(raw_data) <- colnames_raw_data + raw_data <- as.data.frame(raw_data) + + # Wrap up data into a list + modelData <- list() + modelData$model <- model + modelData$allIndPars <- allIndPars + modelData$parVals <- parVals + modelData$fit <- fit + modelData$rawdata <- raw_data + if (modelRegressor) { + modelData$modelRegressor <- model_regressor + } + + # Object class definition + class(modelData) <- "hBayesDM" + + # Inform user of completion + cat("\n") + cat("************************************\n") + cat("**** Model fitting is complete! ****\n") + cat("************************************\n") + + return(modelData) + } +} + diff --git a/R/igt_orl.R b/R/igt_orl.R old mode 100755 new mode 100644 index a8e1d7e2..ec6abd6e --- a/R/igt_orl.R +++ b/R/igt_orl.R @@ -1,377 +1,72 @@ -#' Iowa Gambling Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Iowa Gambling Task using the following parameters: "Arew" (reward learning rate), "Apun" (punishment learning rate), "K" (perseverance decay), "betaF" (outcome frequency weight), and "betaP" (perseverance weight). -#' -#' Contributor: \href{https://ccs-lab.github.io/team/nate-haines/}{Nate Haines} -#' -#' \strong{MODEL:} -#' Outcome-Representation Learning Model (Haines, Vassileva, & Ahn (in press) Cognitive Science)) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "deck", "gain", and "loss". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param payscale Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100 -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model ("igt_orl").} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Iowa Gambling Task, there should be four columns of data with the labels -#' "subjID", "deck", "gain", and "loss". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"deck"}}{A nominal integer representing which deck was chosen within the given trial (e.g. A, B, C, or D == 1, 2, 3, or 4 in the IGT).} -#' \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} -#' \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION igt_orl +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/nate-haines/}{Nate Haines} +#' @templateVar TASK_NAME Iowa Gambling Task +#' @templateVar MODEL_NAME Outcome-Representation Learning Model +#' @templateVar MODEL_CITE (Haines et al., 2018, Cognitive Science) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "gain", "loss" +#' @templateVar PARAMETERS "Arew" (reward learning rate), "Apun" (punishment learning rate), "K" (perseverance decay), "betaF" (outcome frequency weight), "betaP" (perseverance weight) +#' @templateVar ADDITIONAL_ARG \code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100. +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} +#' @templateVar DETAILS_DATA_3 \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} +#' @templateVar DETAILS_DATA_4 \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Haines, N., Vassileva, J., Ahn, W.-Y. (in press). The Outcome-Representation Learning model: a novel reinforcement learning model of -#' the Iowa Gambling Task. Cognitive Science. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- igt_orl(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -igt_orl <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - payscale = 100, - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10 ) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data=="example") { - data <- system.file("extdata", "igt_exampleData.txt", package = "hBayesDM") - } else if (data=="choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table( data, header = T, sep="\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows, ] - cat("The number of rows with NAs=", length(NA_rows), ". They are removed prior to modeling the data. \n", sep="") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 5 - POI <- c("mu_Arew", "mu_Apun", "mu_K", "mu_betaF", "mu_betaP", - "sigma", - "Arew", "Apun", "K", "betaF", "betaP", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "igt_orl" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - cat(" # Payscale = ", payscale, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector( rep( 0, numSubjs ) ) # number of trials for each subject - - for ( sIdx in 1:numSubjs ) { - curSubj <- subjList[ sIdx ] - Tsubj[sIdx] <- sum( rawdata$subjID == curSubj ) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - RLmatrix <- SRLmatrix <- array( 0, c(numSubjs, maxTrials ) ) - Ydata <- array(1, c(numSubjs, maxTrials) ) - - for ( subjIdx in 1:numSubjs ) { - #number of trials for each subj. - useTrials <- Tsubj[subjIdx] - currID <- subjList[ subjIdx ] - rawdata_curSubj <- subset( rawdata, rawdata$subjID == currID ) - RLmatrix[subjIdx, 1:useTrials] <- rawdata_curSubj[, "gain"] -1 * abs( rawdata_curSubj[ , "loss" ]) - - for ( tIdx in 1:useTrials ) { - Y_t <- rawdata_curSubj[ tIdx, "choice" ] # chosen Y on trial "t" - Ydata[ subjIdx , tIdx ] <- Y_t - # For binarizing - if ( RLmatrix[subjIdx, tIdx] > 0 ) { - SRLmatrix[subjIdx, tIdx] <- 1 - } else if ( RLmatrix[subjIdx, tIdx] == 0 ) { - SRLmatrix[subjIdx, tIdx] <- 0 - } else { - SRLmatrix[subjIdx, tIdx] <- -1 - } - } - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj , - sign_out = SRLmatrix, - outcome = RLmatrix / payscale , - choice = Ydata - ) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed = c( 0.1, 0.1, 0.1, 0.1, 1.0) - } else { - if (length(inits)==numPars) { - inits_fixed = inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c( qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3]/10), inits_fixed[4], inits_fixed[5] ), - sigma = c(1.0, 1.0, 1.0, 1.0, 1.0), - Arew_pr = rep( qnorm(inits_fixed[1]), numSubjs), - Apun_pr = rep( qnorm(inits_fixed[2]), numSubjs), - K_pr = rep( qnorm(inits_fixed[3]/10), numSubjs), - betaF_pr = rep( inits_fixed[4], numSubjs), - betaP_pr = rep( inits_fixed[5], numSubjs) - ) +#' Haines, N., Vassileva, J., & Ahn, W.-Y. (2018). The Outcome-Representation Learning Model: A +#' Novel Reinforcement Learning Model of the Iowa Gambling Task. Cognitive Science. +#' https://doi.org/10.1111/cogs.12688 + +igt_orl <- hBayesDM_model( + task_name = "igt", + model_name = "orl", + data_columns = c("subjID", "choice", "gain", "loss"), + parameters = list("Arew" = c(0, 0.1, 1), + "Apun" = c(0, 0.1, 1), + "K" = c(0, 0.1, 5), + "betaF" = c(-Inf, 0.1, Inf), + "betaP" = c(-Inf, 1, Inf)), + preprocess_func = function(raw_data, general_info, payscale = 100) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize data arrays + Ydata <- array(-1, c(n_subj, t_max)) + RLmatrix <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + Ydata[i, 1:t] <- DT_subj$choice + RLmatrix[i, 1:t] <- DT_subj$gain - abs(DT_subj$loss) } - } else { - genInitList <- "random" - } - - # For parallel computing if using multi-cores - rstan::rstan_options(auto_write = TRUE) - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$igt_orl - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize) ) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted=T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred==-1] <- NA - } - Arew <- parVals$Arew - Apun <- parVals$Apun - K <- parVals$K - betaF <- parVals$betaF - betaP <- parVals$betaP - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars=="mean") { - allIndPars[i, ] <- c( mean(Arew[, i]), - mean(Apun[, i]), - mean(K[, i]), - mean(betaF[, i]), - mean(betaP[, i])) - } else if (indPars=="median") { - allIndPars[i, ] <- c( median(Arew[, i]), - median(Apun[, i]), - median(K[, i]), - median(betaF[, i]), - median(betaP[, i]) ) - } else if (indPars=="mode") { - allIndPars[i, ] <- c( estimate_mode(Arew[, i]), - estimate_mode(Apun[, i]), - estimate_mode(K[, i]), - estimate_mode(betaF[, i]), - estimate_mode(betaP[, i]) ) - } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("Arew", - "Apun", - "K", - "betaF", - "betaP", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime # time took to run the code - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - save(modelData, file=file.path(saveDir, paste0(modelName, "_", timeStamp, ".RData" ) ) ) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = Ydata, + outcome = RLmatrix / payscale, + sign_out = sign(RLmatrix) + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/igt_pvl_decay.R b/R/igt_pvl_decay.R old mode 100755 new mode 100644 index 13fda2ed..d32e9154 --- a/R/igt_pvl_decay.R +++ b/R/igt_pvl_decay.R @@ -1,361 +1,70 @@ -#' Iowa Gambling Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Iowa Gambling Task using the following parameters: "A" (decay rate), "alpha" (outcome sensitivity), "cons" (response consistency), and "lambda" (loss aversion). -#' -#' \strong{MODEL:} -#' Prospect Valence Learning (PVL) Decay-RI (Ahn et al., 2014, Frontiers in Psychology) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param payscale Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100 -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model ("igt_pvl_decay").} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Iowa Gambling Task, there should be four columns of data with the labels -#' "subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{A nominal integer representing which deck was chosen within the given trial (e.g. A, B, C, or D == 1, 2, 3, or 4 in the IGT).} -#' \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} -#' \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION igt_pvl_decay +#' @templateVar TASK_NAME Iowa Gambling Task +#' @templateVar MODEL_NAME Prospect Valence Learning (PVL) Decay-RI +#' @templateVar MODEL_CITE (Ahn et al., 2014, Frontiers in Psychology) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "gain", "loss" +#' @templateVar PARAMETERS "A" (decay rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion) +#' @templateVar ADDITIONAL_ARG \code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100. +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} +#' @templateVar DETAILS_DATA_3 \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} +#' @templateVar DETAILS_DATA_4 \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Ahn, W.-Y., Vasilev, G., Lee, S.-H., Busemeyer, J. R., Kruschke, J. K., Bechara, A., & Vassileva, J. (2014). Decision-making -#' in stimulant and opiate addicts in protracted abstinence: evidence from computational modeling with pure users. Frontiers in -#' Psychology, 5, 1376. http://doi.org/10.3389/fpsyg.2014.00849 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- igt_pvl_decay(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -igt_pvl_decay <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - payscale = 100, - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "igt_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("mu_A", "mu_alpha", "mu_cons", "mu_lambda", - "sigma", - "A", "alpha", "cons", "lambda", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "igt_pvl_decay" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - cat(" # Payscale = ", payscale, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - RLmatrix <- array(0, c(numSubjs, maxTrials)) - Ydata <- array(-1, c(numSubjs, maxTrials)) - - for (subjIdx in 1:numSubjs) { - #number of trials for each subj. - useTrials <- Tsubj[subjIdx] - currID <- subjList[subjIdx] - rawdata_curSubj <- subset(rawdata, rawdata$subjID == currID) - RLmatrix[subjIdx, 1:useTrials] <- rawdata_curSubj[, "gain"] - 1 * abs(rawdata_curSubj[, "loss"]) - - for (tIdx in 1:useTrials) { - Y_t <- rawdata_curSubj[tIdx, "choice"] # chosen Y on trial "t" - Ydata[subjIdx , tIdx] <- Y_t +#' Ahn, W.-Y., Vasilev, G., Lee, S.-H., Busemeyer, J. R., Kruschke, J. K., Bechara, A., & Vassileva, +#' J. (2014). Decision-making in stimulant and opiate addicts in protracted abstinence: evidence +#' from computational modeling with pure users. Frontiers in Psychology, 5, 1376. +#' http://doi.org/10.3389/fpsyg.2014.00849 + +igt_pvl_decay <- hBayesDM_model( + task_name = "igt", + model_name = "pvl_decay", + data_columns = c("subjID", "choice", "gain", "loss"), + parameters = list("A" = c(0, 0.5, 1), + "alpha" = c(0, 0.5, 2), + "cons" = c(0, 1, 5), + "lambda" = c(0, 1, 10)), + preprocess_func = function(raw_data, general_info, payscale = 100) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize data arrays + Ydata <- array(-1, c(n_subj, t_max)) + RLmatrix <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + Ydata[i, 1:t] <- DT_subj$choice + RLmatrix[i, 1:t] <- DT_subj$gain - abs(DT_subj$loss) } - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj , - outcome = RLmatrix / payscale , - choice = Ydata -) - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.5, 1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2] / 2), qnorm(inits_fixed[3] / 5), qnorm(inits_fixed[4] / 10)), - sigma = c(1.0, 1.0, 1.0, 1.0), - A_pr = rep(qnorm(inits_fixed[1]), numSubjs), - alpha_pr = rep(qnorm(inits_fixed[2]/2), numSubjs), - cons_pr = rep(qnorm(inits_fixed[3]/5), numSubjs), - lambda_pr = rep(qnorm(inits_fixed[4]/10), numSubjs) -) - } - } else { - genInitList <- "random" + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = Ydata, + outcome = RLmatrix / payscale + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # For parallel computing if using multi-cores - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$igt_pvl_decay - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - A <- parVals$A - alpha <- parVals$alpha - cons <- parVals$cons - lambda <- parVals$lambda - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(A[, i]), - mean(alpha[, i]), - mean(cons[, i]), - mean(lambda[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(A[, i]), - median(alpha[, i]), - median(cons[, i]), - median(lambda[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(A[, i]), - estimate_mode(alpha[, i]), - estimate_mode(cons[, i]), - estimate_mode(lambda[, i])) - } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("A", - "alpha", - "cons", - "lambda", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - on.exit(return(modelData)) -} diff --git a/R/igt_pvl_delta.R b/R/igt_pvl_delta.R old mode 100755 new mode 100644 index 925d268b..d02b49ad --- a/R/igt_pvl_delta.R +++ b/R/igt_pvl_delta.R @@ -1,359 +1,70 @@ -#' Iowa Gambling Task (Ahn et al., 2008) -#' -#' @description -#' Hierarchical Bayesian Modeling of the Iowa Gambling Task using the following parameters: "A" (learning rate), "alpha" (outcome sensitivity), "cons" (response consistency), and "lambda" (loss aversion). -#' -#' \strong{MODEL:} -#' Prospect Valence Learning (PVL) Delta (Ahn et al., 2008, Cognitive Science) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param payscale Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100 -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model ("igt_pvl_delta").} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Iowa Gambling Task, there should be four columns of data with the labels -#' "subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{A nominal integer representing which deck was chosen within the given trial (e.g. A, B, C, or D == 1, 2, 3, or 4 in the IGT).} -#' \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} -#' \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION igt_pvl_delta +#' @templateVar TASK_NAME Iowa Gambling Task +#' @templateVar TASK_CITE (Ahn et al., 2008) +#' @templateVar MODEL_NAME Prospect Valence Learning (PVL) Delta +#' @templateVar MODEL_CITE (Ahn et al., 2008, Cognitive Science) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "gain", "loss" +#' @templateVar PARAMETERS "A" (learning rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion) +#' @templateVar ADDITIONAL_ARG \code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100. +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} +#' @templateVar DETAILS_DATA_3 \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} +#' @templateVar DETAILS_DATA_4 \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Ahn, W. Y., Busemeyer, J. R., & Wagenmakers, E. J. (2008). Comparison of decision learning models using the generalization -#' criterion method. Cognitive Science, 32(8), 1376-1402. http://doi.org/10.1080/03640210802352992 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- igt_pvl_delta(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -igt_pvl_delta <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - payscale = 100, - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "igt_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("mu_A", "mu_alpha", "mu_cons", "mu_lambda", - "sigma", - "A", "alpha", "cons", "lambda", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "igt_pvl_delta" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - cat(" # Payscale = ", payscale, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - RLmatrix <- array(0, c(numSubjs, maxTrials)) - Ydata <- array(-1, c(numSubjs, maxTrials)) - - for (subjIdx in 1:numSubjs) { - #number of trials for each subj. - useTrials <- Tsubj[subjIdx] - currID <- subjList[subjIdx] - rawdata_curSubj <- subset(rawdata, rawdata$subjID == currID) - RLmatrix[subjIdx, 1:useTrials] <- rawdata_curSubj[, "gain"] - 1 * abs(rawdata_curSubj[, "loss"]) - - for (tIdx in 1:useTrials) { - Y_t <- rawdata_curSubj[tIdx, "choice"] # chosen Y on trial "t" - Ydata[subjIdx , tIdx] <- Y_t +#' Ahn, W. Y., Busemeyer, J. R., & Wagenmakers, E. J. (2008). Comparison of decision learning models +#' using the generalization criterion method. Cognitive Science, 32(8), 1376-1402. +#' http://doi.org/10.1080/03640210802352992 + +igt_pvl_delta <- hBayesDM_model( + task_name = "igt", + model_name = "pvl_delta", + data_columns = c("subjID", "choice", "gain", "loss"), + parameters = list("A" = c(0, 0.5, 1), + "alpha" = c(0, 0.5, 2), + "cons" = c(0, 1, 5), + "lambda" = c(0, 1, 10)), + preprocess_func = function(raw_data, general_info, payscale = 100) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize data arrays + Ydata <- array(-1, c(n_subj, t_max)) + RLmatrix <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + Ydata[i, 1:t] <- DT_subj$choice + RLmatrix[i, 1:t] <- DT_subj$gain - abs(DT_subj$loss) } - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj , - outcome = RLmatrix / payscale , - choice = Ydata -) - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.5, 1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2] / 2), qnorm(inits_fixed[3] / 5), qnorm(inits_fixed[4] / 10)), - sigma = c(1.0, 1.0, 1.0, 1.0), - A_pr = rep(qnorm(inits_fixed[1]), numSubjs), - alpha_pr = rep(qnorm(inits_fixed[2]/2), numSubjs), - cons_pr = rep(qnorm(inits_fixed[3]/5), numSubjs), - lambda_pr = rep(qnorm(inits_fixed[4]/10), numSubjs) -) - } - } else { - genInitList <- "random" + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = Ydata, + outcome = RLmatrix / payscale + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$igt_pvl_delta - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - A <- parVals$A - alpha <- parVals$alpha - cons <- parVals$cons - lambda <- parVals$lambda - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(A[, i]), - mean(alpha[, i]), - mean(cons[, i]), - mean(lambda[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(A[, i]), - median(alpha[, i]), - median(cons[, i]), - median(lambda[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(A[, i]), - estimate_mode(alpha[, i]), - estimate_mode(cons[, i]), - estimate_mode(lambda[, i])) - } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("A", - "alpha", - "cons", - "lambda", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/igt_vpp.R b/R/igt_vpp.R old mode 100755 new mode 100644 index bdb2aaac..908f5a5d --- a/R/igt_vpp.R +++ b/R/igt_vpp.R @@ -1,385 +1,73 @@ -#' Iowa Gambling Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Iowa Gambling Task using the following parameters: "A" (learning rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion), "epP" (gain impact), "epN" (loss impact), "K" (decay rate), and "w" (RL weight). -#' -#' \strong{MODEL:} -#' Value-Plus-Perseverance (Worthy et al., 2014, Frontiers in Psychology) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param payscale Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100 -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model ("igt_vpp").} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Iowa Gambling Task, there should be four columns of data with the labels -#' "subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{A nominal integer representing which deck was chosen within the given trial (e.g. A, B, C, or D == 1, 2, 3, or 4 in the IGT).} -#' \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} -#' \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION igt_vpp +#' @templateVar TASK_NAME Iowa Gambling Task +#' @templateVar MODEL_NAME Value-Plus-Perseverance +#' @templateVar MODEL_CITE (Worthy et al., 2013, Frontiers in Psychology) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "gain", "loss" +#' @templateVar PARAMETERS "A" (learning rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion), "epP" (gain impact), "epN" (loss impact), "K" (decay rate), "w" (RL weight) +#' @templateVar ADDITIONAL_ARG \code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100. +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} +#' @templateVar DETAILS_DATA_3 \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} +#' @templateVar DETAILS_DATA_4 \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Worthy, D. A., & Todd Maddox, W. (2014). A comparison model of reinforcement-learning and win-stay-lose-shift decision-making -#' processes: A tribute to W.K. Estes. Journal of Mathematical Psychology, 59, 41-49. http://doi.org/10.1016/j.jmp.2013.10.001 -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- igt_vpp(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -igt_vpp <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - payscale = 100, - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "igt_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 8 - POI <- c("mu_A", "mu_alpha", "mu_cons", "mu_lambda", "mu_epP", "mu_epN", "mu_K", "mu_w", - "sigma", - "A", "alpha", "cons", "lambda", "epP", "epN", "K", "w", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "igt_vpp" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - cat(" # Payscale = ", payscale, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - RLmatrix <- array(0, c(numSubjs, maxTrials)) - Ydata <- array(-1, c(numSubjs, maxTrials)) - - for (subjIdx in 1:numSubjs) { - #number of trials for each subj. - useTrials <- Tsubj[subjIdx] - currID <- subjList[subjIdx] - rawdata_curSubj <- subset(rawdata, rawdata$subjID == currID) - RLmatrix[subjIdx, 1:useTrials] <- rawdata_curSubj[, "gain"] - 1 * abs(rawdata_curSubj[, "loss"]) - - for (tIdx in 1:useTrials) { - Y_t <- rawdata_curSubj[tIdx, "choice"] # chosen Y on trial "t" - Ydata[subjIdx , tIdx] <- Y_t +#' Worthy, D. A., & Todd Maddox, W. (2013). A comparison model of reinforcement-learning and +#' win-stay-lose-shift decision-making processes: A tribute to W.K. Estes. Journal of Mathematical +#' Psychology, 59, 41-49. http://doi.org/10.1016/j.jmp.2013.10.001 + +igt_vpp <- hBayesDM_model( + task_name = "igt", + model_name = "vpp", + data_columns = c("subjID", "choice", "gain", "loss"), + parameters = list("A" = c(0, 0.5, 1), + "alpha" = c(0, 0.5, 2), + "cons" = c(0, 1, 5), + "lambda" = c(0, 1, 10), + "epP" = c(-Inf, 0, Inf), + "epN" = c(-Inf, 0, Inf), + "K" = c(0, 0.5, 1), + "w" = c(0, 0.5, 1)), + preprocess_func = function(raw_data, general_info, payscale = 100) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize data arrays + Ydata <- array(-1, c(n_subj, t_max)) + RLmatrix <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + Ydata[i, 1:t] <- DT_subj$choice + RLmatrix[i, 1:t] <- DT_subj$gain - abs(DT_subj$loss) } - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - outcome = RLmatrix / payscale, - choice = Ydata -) - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed = c(0.5, 0.5, 1.0, 1.0, 0, 0, 0.5, 0.5) - } else { - if (length(inits) == numPars) { - inits_fixed = inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3] / 5), qnorm(inits_fixed[4] / 10), - inits_fixed[5], inits_fixed[6], qnorm(inits_fixed[7]), qnorm(inits_fixed[8])), - sigma = c(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0), - A_pr = rep(qnorm(inits_fixed[1]), numSubjs), - alpha_pr = rep(qnorm(inits_fixed[2]), numSubjs), - cons_pr = rep(qnorm(inits_fixed[3]/5), numSubjs), - lambda_pr = rep(qnorm(inits_fixed[4]/10), numSubjs), - epP_pr = rep(inits_fixed[5], numSubjs), - epN_pr = rep(inits_fixed[6], numSubjs), - K_pr = rep(qnorm(inits_fixed[7]), numSubjs), - w_pr = rep(qnorm(inits_fixed[8]), numSubjs) -) - } - } else { - genInitList <- "random" + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = Ydata, + outcome = RLmatrix / payscale + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # For parallel computing if using multi-cores - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$igt_vpp - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - A <- parVals$A - alpha <- parVals$alpha - cons <- parVals$cons - lambda <- parVals$lambda - epP <- parVals$epP - epN <- parVals$epN - K <- parVals$K - w <- parVals$w - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(A[, i]), - mean(alpha[, i]), - mean(cons[, i]), - mean(lambda[, i]), - mean(epP[, i]), - mean(epN[, i]), - mean(K[, i]), - mean(w[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(A[, i]), - median(alpha[, i]), - median(cons[, i]), - median(lambda[, i]), - median(epP[, i]), - median(epN[, i]), - median(K[, i]), - median(w[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(A[, i]), - estimate_mode(alpha[, i]), - estimate_mode(cons[, i]), - estimate_mode(lambda[, i]), - estimate_mode(epP[, i]), - estimate_mode(epN[, i]), - estimate_mode(K[, i]), - estimate_mode(w[, i])) - } - } - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("A", - "alpha", - "cons", - "lambda", - "epP", - "epN", - "K", - "w", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/multiplot.R b/R/multiplot.R old mode 100755 new mode 100644 diff --git a/R/peer_ocu.R b/R/peer_ocu.R old mode 100755 new mode 100644 index 093fff26..dfe6547f --- a/R/peer_ocu.R +++ b/R/peer_ocu.R @@ -1,368 +1,87 @@ -#' Peer influence task (Chung et al., 2015 Nature Neuroscience) -#' -#' @description -#' Hierarchical Bayesian Modeling of the Peer Influence Task with the following parameters: "rho" (risk preference), "tau" (inverse temperature), and "ocu" (other-conferred utility).\cr\cr -#' -#' Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Peer influence task - OCU (other-conferred utility) model -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Risk Aversion Task, there should be four columns of data with the labels -#' "subjID", "condition", "p_gamble", "safe_Hpayoff", "safe_Lpayoff", "risky_Hpayoff", "risky_Lpayoff", "choice". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"condition"}}{0: solo, 1: info (safe/safe), 2: info (mix), 3: info (risky/risky)} -#' \item{\code{"p_gamble"}}{Probability of receiving a high payoff (same for both options)} -#' \item{\code{"safe_Hpayoff"}}{High payoff of the safe option} -#' \item{\code{"safe_Lpayoff"}}{Low payoff of the safe option} -#' \item{\code{"risky_Hpayoff"}}{High payoff of the risky option} -#' \item{\code{"risky_Lpayoff"}}{Low payoff of the risky option} -#' \item{\code{"choice"}}{Which option was chosen? 0: safe 1: risky} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION peer_ocu +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Peer Influence Task +#' @templateVar TASK_CITE (Chung et al., 2015, Nature Neuroscience) +#' @templateVar MODEL_NAME Other-Conferred Utility (OCU) Model +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "condition", "p_gamble", "safe_Hpayoff", "safe_Lpayoff", "risky_Hpayoff", "risky_Lpayoff", "choice" +#' @templateVar PARAMETERS "rho" (risk preference), "tau" (inverse temperature), "ocu" (other-conferred utility) +#' @templateVar LENGTH_DATA_COLUMNS 8 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"condition"}{0: solo, 1: info (safe/safe), 2: info (mix), 3: info (risky/risky).} +#' @templateVar DETAILS_DATA_3 \item{"p_gamble"}{Probability of receiving a high payoff (same for both options).} +#' @templateVar DETAILS_DATA_4 \item{"safe_Hpayoff"}{High payoff of the safe option.} +#' @templateVar DETAILS_DATA_5 \item{"safe_Lpayoff"}{Low payoff of the safe option.} +#' @templateVar DETAILS_DATA_6 \item{"risky_Hpayoff"}{High payoff of the risky option.} +#' @templateVar DETAILS_DATA_7 \item{"risky_Lpayoff"}{Low payoff of the risky option.} +#' @templateVar DETAILS_DATA_8 \item{"choice"}{Which option was chosen? 0: safe, 1: risky.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Chung, D., Christopoulos, G. I., King-Casas, B., Ball, S. B., & Chiu, P. H. (2015). Social signals of safety and risk confer utility and have asymmetric effects on observers' choices. -#' Nature neuroscience, 18(6), 912-916. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- peer_ocu(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' -#' -#' } - -peer_ocu <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "peer_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_rho", "mu_tau", "mu_ocu", - "sigma", - "rho" , "tau", "ocu", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "peer_ocu" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - safe_Hpayoff <- array(0, c(numSubjs, maxTrials)) - safe_Lpayoff <- array(0, c(numSubjs, maxTrials)) - risky_Hpayoff <- array(0, c(numSubjs, maxTrials)) - risky_Lpayoff <- array(0, c(numSubjs, maxTrials)) - condition <- array(0, c(numSubjs, maxTrials)) - p_gamble <- array(0, c(numSubjs, maxTrials)) - choice <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - safe_Hpayoff[i, 1:useTrials] <- tmp[1:useTrials, "safe_Hpayoff"] - safe_Lpayoff[i, 1:useTrials] <- tmp[1:useTrials, "safe_Lpayoff"] - risky_Hpayoff[i, 1:useTrials] <- tmp[1:useTrials, "risky_Hpayoff"] - risky_Lpayoff[i, 1:useTrials] <- tmp[1:useTrials, "risky_Lpayoff"] - condition[i, 1:useTrials] <- tmp[1:useTrials, "condition"] - p_gamble[i, 1:useTrials] <- tmp[1:useTrials, "p_gamble"] - choice[i, 1:useTrials] <- tmp[1:useTrials, "choice"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - numPars = numPars, - safe_Hpayoff = safe_Hpayoff, - safe_Lpayoff = safe_Lpayoff, - risky_Hpayoff = risky_Hpayoff, - risky_Lpayoff = risky_Lpayoff, - condition = condition, - p_gamble = p_gamble, - choice = choice -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 1.0, 0.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]/2), log(inits_fixed[2]), inits_fixed[3]), - sigma = c(1.0, 1.0, 1.0), - rho_p = rep(qnorm(inits_fixed[1]/2), numSubjs), - tau_p = rep(log(inits_fixed[2]), numSubjs), - ocu_p = rep(inits_fixed[3], numSubjs) - ) +#' Chung, D., Christopoulos, G. I., King-Casas, B., Ball, S. B., & Chiu, P. H. (2015). Social +#' signals of safety and risk confer utility and have asymmetric effects on observers' choices. +#' Nature Neuroscience, 18(6), 912-916. + +peer_ocu <- hBayesDM_model( + task_name = "peer", + model_name = "ocu", + data_columns = c("subjID", "condition", "p_gamble", "safe_Hpayoff", "safe_Lpayoff", "risky_Hpayoff", "risky_Lpayoff", "choice"), + parameters = list("rho" = c(0, 1, 2), + "tau" = c(0, 1, Inf), + "ocu" = c(-Inf, 0, Inf)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + condition <- array( 0, c(n_subj, t_max)) + p_gamble <- array( 0, c(n_subj, t_max)) + safe_Hpayoff <- array( 0, c(n_subj, t_max)) + safe_Lpayoff <- array( 0, c(n_subj, t_max)) + risky_Hpayoff <- array( 0, c(n_subj, t_max)) + risky_Lpayoff <- array( 0, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + condition[i, 1:t] <- DT_subj$condition + p_gamble[i, 1:t] <- DT_subj$pgamble + safe_Hpayoff[i, 1:t] <- DT_subj$safehpayoff + safe_Lpayoff[i, 1:t] <- DT_subj$safelpayoff + risky_Hpayoff[i, 1:t] <- DT_subj$riskyhpayoff + risky_Lpayoff[i, 1:t] <- DT_subj$riskylpayoff + choice[i, 1:t] <- DT_subj$choice } - } else { - genInitList <- "random" - } - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + condition = condition, + p_gamble = p_gamble, + safe_Hpayoff = safe_Hpayoff, + safe_Lpayoff = safe_Lpayoff, + risky_Hpayoff = risky_Hpayoff, + risky_Lpayoff = risky_Lpayoff, + choice = choice + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$peer_ocu - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - rho <- parVals$rho - tau <- parVals$tau - ocu <- parVals$ocu - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(rho[, i]), - mean(tau[, i]), - mean(ocu[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(rho[, i]), - median(tau[, i]), - median(ocu[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(rho[, i]), - estimate_mode(tau[, i]), - estimate_mode(ocu[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("rho", - "tau", - "ocu", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/plot.hBayesDM.R b/R/plot.hBayesDM.R old mode 100755 new mode 100644 diff --git a/R/plotDist.R b/R/plotDist.R old mode 100755 new mode 100644 diff --git a/R/plotHDI.R b/R/plotHDI.R old mode 100755 new mode 100644 diff --git a/R/plotInd.R b/R/plotInd.R old mode 100755 new mode 100644 diff --git a/R/printFit.R b/R/printFit.R old mode 100755 new mode 100644 diff --git a/R/prl_ewa.R b/R/prl_ewa.R old mode 100755 new mode 100644 index 2b55e2a1..c83e8e68 --- a/R/prl_ewa.R +++ b/R/prl_ewa.R @@ -1,372 +1,72 @@ -#' Probabilistic Reversal Learning Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "phi" (1 - learning rate), "rho" (experience decay factor), and "beta" (inverse temperature). -#' -#' Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Experience-Weighted Attraction Model (Ouden et al., 2013, Neuron) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"prl_ewa"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} -#' \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION prl_ewa +#' @templateVar CONTRIBUTOR (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Probabilistic Reversal Learning Task +#' @templateVar MODEL_NAME Experience-Weighted Attraction Model +#' @templateVar MODEL_CITE (Ouden et al., 2013, Neuron) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "phi" (1 - learning rate), "rho" (experience decay factor), "beta" (inverse temperature) +#' @templateVar REGRESSORS "ev_c", "ev_nc", "ew_c", "ew_nc" +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -#' Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- prl_ewa(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -prl_ewa <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "prl_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_phi", "mu_rho", "mu_beta", - "sigma", - "phi", "rho", "beta", - "log_lik") - - if (modelRegressor) - POI <- c(POI, "mr_ev_c", "mr_ev_nc", "mr_ew_c", "mr_ew_nc") - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "prl_ewa" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxTrials)) - outcome <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - choice[i, 1:useTrials] <- tmp$choice - outcome[i, 1:useTrials] <- sign(tmp$outcome) # use sign - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars - ) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.1, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") +#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. +#' (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), +#' 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 + +prl_ewa <- hBayesDM_model( + task_name = "prl", + model_name = "ewa", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("phi" = c(0, 0.5, 1), + "rho" = c(0, 0.1, 1), + "beta" = c(0, 1, 10)), + regressors = list("ev_c" = 2, + "ev_nc" = 2, + "ew_c" = 2, + "ew_nc" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + outcome[i, 1:t] <- sign(DT_subj$outcome) # use sign } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3] / 10)), - sigma = c(1.0, 1.0, 1.0), - phi_pr = rep(qnorm(inits_fixed[1]), numSubjs), - rho_pr = rep(qnorm(inits_fixed[2]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[3]/10), numSubjs) - ) - } + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m <- stanmodels$prl_ewa - - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit <- rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - phi <- parVals$phi - rho <- parVals$rho - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(phi[, i]), - measureIndPars(rho[, i]), - measureIndPars(beta[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("phi", - "rho", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - if (modelRegressor) { - ev_c <- apply(parVals$mr_ev_c, c(2, 3), measureIndPars) - ev_nc <- apply(parVals$mr_ev_nc, c(2, 3), measureIndPars) - ew_c <- apply(parVals$mr_ew_c, c(2, 3), measureIndPars) - ew_nc <- apply(parVals$mr_ew_nc, c(2, 3), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - regressors <- NULL - regressors$ev_c <- ev_c - regressors$ev_nc <- ev_nc - regressors$ew_c <- ew_c - regressors$ew_nc <- ew_nc - - - modelData$modelRegressor <- regressors - } - - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} - +) diff --git a/R/prl_fictitious.R b/R/prl_fictitious.R old mode 100755 new mode 100644 index 46bd6e21..39ed96bd --- a/R/prl_fictitious.R +++ b/R/prl_fictitious.R @@ -1,370 +1,73 @@ -#' Probabilistic Reversal Learning Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature). -#' -#' Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"prl_fictitious"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} -#' \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION prl_fictitious +#' @templateVar CONTRIBUTOR (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Probabilistic Reversal Learning Task +#' @templateVar MODEL_NAME Fictitious Update Model +#' @templateVar MODEL_CITE (Glascher et al., 2009, Cerebral Cortex) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature) +#' @templateVar REGRESSORS "ev_c", "ev_nc", "pe_c", "pe_nc", "dv" +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding -#' Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- prl_fictitious(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -prl_fictitious <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "prl_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_eta", "mu_alpha", "mu_beta", - "sigma", - "eta", "alpha", "beta", - "log_lik") - - if (modelRegressor) - POI <- c(POI, "mr_ev_c", "mr_ev_nc", "mr_pe_c", "mr_pe_nc", "mr_dv") - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "prl_fictitious" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxTrials)) - outcome <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - choice[i, 1:useTrials] <- tmp$choice - outcome[i, 1:useTrials] <- sign(tmp$outcome) # use sign - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars -) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.0, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") +#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial +#' Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. +#' Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 + +prl_fictitious <- hBayesDM_model( + task_name = "prl", + model_name = "fictitious", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("eta" = c(0, 0.5, 1), + "alpha" = c(-Inf, 0, Inf), + "beta" = c(0, 1, 10)), + regressors = list("ev_c" = 2, + "ev_nc" = 2, + "pe_c" = 2, + "pe_nc" = 2, + "dv" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + outcome[i, 1:t] <- sign(DT_subj$outcome) # use sign } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), inits_fixed[2], qnorm(inits_fixed[3] / 5)), - sigma = c(1.0, 1.0, 1.0), - eta_pr = rep(qnorm(inits_fixed[1]), numSubjs), - alpha_pr = rep(inits_fixed[2], numSubjs), - beta_pr = rep(qnorm(inits_fixed[3]/5), numSubjs) - ) - } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m <- stanmodels$prl_fictitious - - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - eta <- parVals$eta - alpha <- parVals$alpha - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(eta[, i]), - measureIndPars(alpha[, i]), - measureIndPars(beta[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("eta", - "alpha", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - if (modelRegressor) { - ev_c <- apply(parVals$mr_ev_c, c(2, 3), measureIndPars) - ev_nc <- apply(parVals$mr_ev_nc, c(2, 3), measureIndPars) - pe_c <- apply(parVals$mr_pe_c, c(2, 3), measureIndPars) - pe_nc <- apply(parVals$mr_pe_nc, c(2, 3), measureIndPars) - dv <- apply(parVals$mr_dv, c(2, 3), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - regressors <- NULL - regressors$ev_c <- ev_c - regressors$ev_nc <- ev_nc - regressors$pe_c <- pe_c - regressors$pe_nc <- pe_nc - regressors$dv <- dv - - modelData$modelRegressor <- regressors + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/prl_fictitious_multipleB.R b/R/prl_fictitious_multipleB.R old mode 100755 new mode 100644 index cdbf104f..d6f71822 --- a/R/prl_fictitious_multipleB.R +++ b/R/prl_fictitious_multipleB.R @@ -1,373 +1,85 @@ -#' Probabilistic Reversal Learning Task (Glascher et al, 2008), multiple blocks per subject -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature). -#' -#' Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "outcome", adn "block". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{'hBayesDM'} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model ("prl_fictitious_multipleB").} -#' \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter -#' values (as specified by \code{'indPars'}) for each subject.} -#' \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -#' with the labels "subjID", "choice", and "rewlos". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} -#' \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} -#' \item{\code{"block"}}{An integer value representing the block number of the current trial (e.g., 1 1 1 2 2 2).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. +#' @templateVar MODEL_FUNCTION prl_fictitious_multipleB +#' @templateVar CONTRIBUTOR (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Probabilistic Reversal Learning Task +#' @templateVar MODEL_NAME Fictitious Update Model +#' @templateVar MODEL_CITE (Glascher et al., 2009, Cerebral Cortex) +#' @templateVar MODEL_TYPE Multiple-Block Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "block", "choice", "outcome" +#' @templateVar PARAMETERS "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature) +#' @templateVar REGRESSORS "ev_c", "ev_nc", "pe_c", "pe_nc", "dv" +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"block"}{A unique identifier for each of the multiple blocks within each subject.} +#' @templateVar DETAILS_DATA_3 \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} +#' @templateVar DETAILS_DATA_4 \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- prl_fictitious_multipleB(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -prl_fictitious_multipleB <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "prl_multipleB_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_eta", "mu_alpha", "mu_beta", - "sigma", - "eta", "alpha", "beta", - "log_lik") - - if (modelRegressor) - POI <- c(POI, "mr_ev_c", "mr_ev_nc", "mr_pe_c", "mr_pe_nc", "mr_dv") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "prl_fictitious_multipleB" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - maxB = length(unique(rawdata$block)) # maximum number of block - B = NULL # number of blocks for each subject - - Tsubj <- array(0, c(numSubjs, maxB)) # number of trials for each subject, in each block - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - tmpDat = subset(rawdata, rawdata$subjID == curSubj) - tmpAllBlocks = unique(tmpDat$block) # temp. subject's all blocks - B[i] = length(tmpAllBlocks) - for (bIdx in 1:B[i]) { - Tsubj[i, bIdx] = sum(tmpDat$block == tmpAllBlocks[bIdx]) +#' @include hBayesDM_model.R +#' +#' @references +#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial +#' Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. +#' Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 + +prl_fictitious_multipleB <- hBayesDM_model( + task_name = "prl", + model_name = "fictitious", + model_type = "multipleB", + data_columns = c("subjID", "block", "choice", "outcome"), + parameters = list("eta" = c(0, 0.5, 1), + "alpha" = c(-Inf, 0, Inf), + "beta" = c(0, 1, 10)), + regressors = list("ev_c" = 3, + "ev_nc" = 3, + "pe_c" = 3, + "pe_nc" = 3, + "dv" = 3), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + b_subjs <- general_info$b_subjs + b_max <- general_info$b_max + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, b_max, t_max)) + outcome <- array( 0, c(n_subj, b_max, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + DT_subj <- raw_data[subjid == subj] + blocks_of_subj <- unique(DT_subj$block) + + for (b in 1:b_subjs[i]) { + curr_block <- blocks_of_subj[b] + DT_curr_block <- DT_subj[block == curr_block] + t <- t_subjs[i, b] + + choice[i, b, 1:t] <- DT_curr_block$choice + outcome[i, b, 1:t] <- sign(DT_curr_block$outcome) # use sign + } } - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) blocks per subject = ", maxB, "\n\n") - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxB, maxTrials)) - outcome <- array(0, c(numSubjs, maxB, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - tmpDat = subset(rawdata, rawdata$subjID == curSubj) - tmpAllBlocks = unique(tmpDat$block) # temp. subject's all blocks - for (bIdx in 1:B[i]) { - tmp = subset(tmpDat, tmpDat$block == tmpAllBlocks[bIdx]) - useTrials <- Tsubj[i, bIdx] - choice[i, bIdx, 1:useTrials] <- tmp$choice - outcome[i, bIdx, 1:useTrials] <- sign(tmp$outcome) # use sign - } + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + B = b_max, + Bsubj = b_subjs, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } - - dataList <- list( - N = numSubjs, - T = maxTrials, - maxB = maxB, - B = B, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars ) - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.0, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") - } - - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), inits_fixed[2], qnorm(inits_fixed[3] / 10)), - sigma = c(1.0, 1.0, 1.0), - eta_pr = rep(qnorm(inits_fixed[1]), numSubjs), - alpha_pr = rep(inits_fixed[2], numSubjs), - beta_pr = rep(qnorm(inits_fixed[3]/10), numSubjs) - ) - } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$prl_fictitious_multipleB - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - eta <- parVals$eta - alpha <- parVals$alpha - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(eta[, i]), - measureIndPars(alpha[, i]), - measureIndPars(beta[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("eta", - "alpha", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - if (modelRegressor) { - ev_c <- apply(parVals$mr_ev_c, c(2, 3, 4), measureIndPars) - ev_nc <- apply(parVals$mr_ev_nc, c(2, 3, 4), measureIndPars) - pe_c <- apply(parVals$mr_pe_c, c(2, 3, 4), measureIndPars) - pe_nc <- apply(parVals$mr_pe_nc, c(2, 3, 4), measureIndPars) - dv <- apply(parVals$mr_dv, c(2, 3, 4), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - regressors <- NULL - regressors$ev_c <- ev_c - regressors$ev_nc <- ev_nc - regressors$pe_c <- pe_c - regressors$pe_nc <- pe_nc - regressors$dv <- dv - - modelData$modelRegressor <- regressors - } - - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} - - diff --git a/R/prl_fictitious_rp.R b/R/prl_fictitious_rp.R old mode 100755 new mode 100644 index 6bce19ae..a8ec6d6c --- a/R/prl_fictitious_rp.R +++ b/R/prl_fictitious_rp.R @@ -1,376 +1,77 @@ -#' Probabilistic Reversal Learning Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta_pos" (learning rate, +PE), "eta_neg" (learning rate, -PE), "alpha" (indecision point), "beta" (inverse temperature). -#' -#' Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) + separate learning rates for + and - prediction error (PE) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"prl_fictitious"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} -#' \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION prl_fictitious_rp +#' @templateVar CONTRIBUTOR (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Probabilistic Reversal Learning Task +#' @templateVar MODEL_NAME Fictitious Update Model (Glascher et al., 2009, Cerebral Cortex), with separate learning rates for positive and negative prediction error (PE) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "eta_pos" (learning rate, +PE), "eta_neg" (learning rate, -PE), "alpha" (indecision point), "beta" (inverse temperature) +#' @templateVar REGRESSORS "ev_c", "ev_nc", "pe_c", "pe_nc", "dv" +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding -#' Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -#' Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 - -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- prl_fictitious_rp(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -prl_fictitious_rp <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "prl_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("mu_eta_pos", "mu_eta_neg", "mu_alpha", "mu_beta", - "sigma", - "eta_pos", "eta_neg", "alpha", "beta", - "log_lik") - - if (modelRegressor) - POI <- c(POI, "mr_ev_c", "mr_ev_nc", "mr_pe_c", "mr_pe_nc", "mr_dv") - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "prl_fictitious_rp" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxTrials)) - outcome <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - choice[i, 1:useTrials] <- tmp$choice - outcome[i, 1:useTrials] <- sign(tmp$outcome) # use sign - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars -) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.5, 0.0, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") - } - - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), inits_fixed[3], qnorm(inits_fixed[4] / 5)), - sigma = c(1.0, 1.0, 1.0, 1.0), - eta_pos_pr = rep(qnorm(inits_fixed[1]), numSubjs), - eta_neg_pr = rep(qnorm(inits_fixed[2]), numSubjs), - alpha_pr = rep(inits_fixed[3], numSubjs), - beta_pr = rep(qnorm(inits_fixed[4]/5), numSubjs) - ) +#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial +#' Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. +#' Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 +#' +#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. +#' (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), +#' 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 + +prl_fictitious_rp <- hBayesDM_model( + task_name = "prl", + model_name = "fictitious_rp", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("eta_pos" = c(0, 0.5, 1), + "eta_neg" = c(0, 0.5, 1), + "alpha" = c(-Inf, 0, Inf), + "beta" = c(0, 1, 10)), + regressors = list("ev_c" = 2, + "ev_nc" = 2, + "pe_c" = 2, + "pe_nc" = 2, + "dv" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + outcome[i, 1:t] <- sign(DT_subj$outcome) # use sign } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$prl_fictitious_rp - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - eta_pos <- parVals$eta_pos - eta_neg <- parVals$eta_neg - alpha <- parVals$alpha - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(eta_pos[, i]), - measureIndPars(eta_neg[, i]), - measureIndPars(alpha[, i]), - measureIndPars(beta[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("eta_pos", - "eta_neg", - "alpha", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - if (modelRegressor) { - ev_c <- apply(parVals$mr_ev_c, c(2, 3), measureIndPars) - ev_nc <- apply(parVals$mr_ev_nc, c(2, 3), measureIndPars) - pe_c <- apply(parVals$mr_pe_c, c(2, 3), measureIndPars) - pe_nc <- apply(parVals$mr_pe_nc, c(2, 3), measureIndPars) - dv <- apply(parVals$mr_dv, c(2, 3), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - regressors <- NULL - regressors$ev_c <- ev_c - regressors$ev_nc <- ev_nc - regressors$pe_c <- pe_c - regressors$pe_nc <- pe_nc - regressors$dv <- dv - - modelData$modelRegressor <- regressors + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/prl_fictitious_rp_woa.R b/R/prl_fictitious_rp_woa.R old mode 100755 new mode 100644 index db2c9841..496e7e44 --- a/R/prl_fictitious_rp_woa.R +++ b/R/prl_fictitious_rp_woa.R @@ -1,372 +1,76 @@ -#' Probabilistic Reversal Learning Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta_pos" (learning rate, +PE), "eta_neg" (learning rate, -PE), "alpha" (indecision point), "beta" (inverse temperature). -#' -#' Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) + separate learning rates for + and - prediction error (PE) without alpha (indecision point) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"prl_fictitious"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} -#' \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION prl_fictitious_rp_woa +#' @templateVar CONTRIBUTOR (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Probabilistic Reversal Learning Task +#' @templateVar MODEL_NAME Fictitious Update Model (Glascher et al., 2009, Cerebral Cortex), with separate learning rates for positive and negative prediction error (PE), without alpha (indecision point) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "eta_pos" (learning rate, +PE), "eta_neg" (learning rate, -PE), "beta" (inverse temperature) +#' @templateVar REGRESSORS "ev_c", "ev_nc", "pe_c", "pe_nc", "dv" +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding -#' Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -#' Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 - -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- prl_fictitious_rp_woa(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -prl_fictitious_rp_woa <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "prl_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_eta_pos", "mu_eta_neg", "mu_beta", - "sigma", - "eta_pos", "eta_neg", "beta", - "log_lik") - - if (modelRegressor) - POI <- c(POI, "mr_ev_c", "mr_ev_nc", "mr_pe_c", "mr_pe_nc", "mr_dv") - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "prl_fictitious_rp_woa" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxTrials)) - outcome <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - choice[i, 1:useTrials] <- tmp$choice - outcome[i, 1:useTrials] <- sign(tmp$outcome) # use sign - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars -) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 0.5, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") - } - - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3] / 5)), - sigma = c(1.0, 1.0, 1.0), - eta_pos_pr = rep(qnorm(inits_fixed[1]), numSubjs), - eta_neg_pr = rep(qnorm(inits_fixed[2]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[3]/5), numSubjs) - ) +#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial +#' Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. +#' Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 +#' +#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. +#' (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), +#' 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 + +prl_fictitious_rp_woa <- hBayesDM_model( + task_name = "prl", + model_name = "fictitious_rp_woa", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("eta_pos" = c(0, 0.5, 1), + "eta_neg" = c(0, 0.5, 1), + "beta" = c(0, 1, 10)), + regressors = list("ev_c" = 2, + "ev_nc" = 2, + "pe_c" = 2, + "pe_nc" = 2, + "dv" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + outcome[i, 1:t] <- sign(DT_subj$outcome) # use sign } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$prl_fictitious_rp_woa - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA - } - - eta_pos <- parVals$eta_pos - eta_neg <- parVals$eta_neg - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(eta_pos[, i]), - measureIndPars(eta_neg[, i]), - measureIndPars(beta[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("eta_pos", - "eta_neg", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - if (modelRegressor) { - ev_c <- apply(parVals$mr_ev_c, c(2, 3), measureIndPars) - ev_nc <- apply(parVals$mr_ev_nc, c(2, 3), measureIndPars) - pe_c <- apply(parVals$mr_pe_c, c(2, 3), measureIndPars) - pe_nc <- apply(parVals$mr_pe_nc, c(2, 3), measureIndPars) - dv <- apply(parVals$mr_dv, c(2, 3), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - regressors <- NULL - regressors$ev_c <- ev_c - regressors$ev_nc <- ev_nc - regressors$pe_c <- pe_c - regressors$pe_nc <- pe_nc - regressors$dv <- dv - - modelData$modelRegressor <- regressors + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/prl_fictitious_woa.R b/R/prl_fictitious_woa.R old mode 100755 new mode 100644 index f773704f..4d1d9250 --- a/R/prl_fictitious_woa.R +++ b/R/prl_fictitious_woa.R @@ -1,366 +1,71 @@ -#' Probabilistic Reversal Learning Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature). -#' -#' Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) without alpha (indecision point) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"prl_fictitious"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} -#' \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION prl_fictitious_woa +#' @templateVar CONTRIBUTOR (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Probabilistic Reversal Learning Task +#' @templateVar MODEL_NAME Fictitious Update Model (Glascher et al., 2009, Cerebral Cortex), without alpha (indecision point) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "eta" (learning rate), "beta" (inverse temperature) +#' @templateVar REGRESSORS "ev_c", "ev_nc", "pe_c", "pe_nc", "dv" +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding -#' Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- prl_fictitious_woa(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -prl_fictitious_woa <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "prl_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 2 - POI <- c("mu_eta", "mu_beta", - "sigma", - "eta", "beta", - "log_lik") - - if (modelRegressor) - POI <- c(POI, "mr_ev_c", "mr_ev_nc", "mr_pe_c", "mr_pe_nc", "mr_dv") - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "prl_fictitious_woa" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxTrials)) - outcome <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - choice[i, 1:useTrials] <- tmp$choice - outcome[i, 1:useTrials] <- sign(tmp$outcome) # use sign - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars -) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") +#' Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial +#' Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. +#' Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 + +prl_fictitious_woa <- hBayesDM_model( + task_name = "prl", + model_name = "fictitious_woa", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("eta" = c(0, 0.5, 1), + "beta" = c(0, 1, 10)), + regressors = list("ev_c" = 2, + "ev_nc" = 2, + "pe_c" = 2, + "pe_nc" = 2, + "dv" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + outcome[i, 1:t] <- sign(DT_subj$outcome) # use sign } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2] / 5)), - sigma = c(1.0, 1.0), - eta_pr = rep(qnorm(inits_fixed[1]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[2]/5), numSubjs) - ) - } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m <- stanmodels$prl_fictitious_woa - - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - eta <- parVals$eta - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(eta[, i]), - measureIndPars(beta[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("eta", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - if (modelRegressor) { - ev_c <- apply(parVals$mr_ev_c, c(2, 3), measureIndPars) - ev_nc <- apply(parVals$mr_ev_nc, c(2, 3), measureIndPars) - pe_c <- apply(parVals$mr_pe_c, c(2, 3), measureIndPars) - pe_nc <- apply(parVals$mr_pe_nc, c(2, 3), measureIndPars) - dv <- apply(parVals$mr_dv, c(2, 3), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - regressors <- NULL - regressors$ev_c <- ev_c - regressors$ev_nc <- ev_nc - regressors$pe_c <- pe_c - regressors$pe_nc <- pe_nc - regressors$dv <- dv - - modelData$modelRegressor <- regressors + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/prl_rp.R b/R/prl_rp.R old mode 100755 new mode 100644 index 7e4a773b..44e24341 --- a/R/prl_rp.R +++ b/R/prl_rp.R @@ -1,366 +1,71 @@ -#' Probabilistic Reversal Learning Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "Apun" (punishment learning rate), "Arew" (reward learning rate), and "beta" (inverse temperature). -#' -#' Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Reward-Punishment Model (Ouden et al., 2013, Neuron) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"prl_rp"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} -#' \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION prl_rp +#' @templateVar CONTRIBUTOR (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Probabilistic Reversal Learning Task +#' @templateVar MODEL_NAME Reward-Punishment Model +#' @templateVar MODEL_CITE (Ouden et al., 2013, Neuron) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "Apun" (punishment learning rate), "Arew" (reward learning rate), "beta" (inverse temperature) +#' @templateVar REGRESSORS "ev_c", "ev_nc", "pe" +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -#' Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- prl_rp(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -prl_rp <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "prl_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_Apun", "mu_Arew", "mu_beta", - "sigma", - "Apun", "Arew", "beta", - "log_lik") - - if (modelRegressor) - POI <- c(POI, "mr_ev_c", "mr_ev_nc", "mr_pe") - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "prl_rp" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxTrials)) - outcome <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - choice[i, 1:useTrials] <- tmp$choice - outcome[i, 1:useTrials] <- sign(tmp$outcome) # use sign - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars -) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 0.1, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") +#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. +#' (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), +#' 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 + +prl_rp <- hBayesDM_model( + task_name = "prl", + model_name = "rp", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("Apun" = c(0, 0.1, 1), + "Arew" = c(0, 0.1, 1), + "beta" = c(0, 1, 10)), + regressors = list("ev_c" = 2, + "ev_nc" = 2, + "pe" = 2), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + choice[i, 1:t] <- DT_subj$choice + outcome[i, 1:t] <- sign(DT_subj$outcome) # use sign } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3] / 10)), - sigma = c(1.0, 1.0, 1.0), - Apun_pr = rep(qnorm(inits_fixed[1]), numSubjs), - Arew_pr = rep(qnorm(inits_fixed[2]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[3] / 10), numSubjs) - ) - } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$prl_rp - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit <- rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - Apun <- parVals$Apun - Arew <- parVals$Arew - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - allIndPars[i,] <- c(measureIndPars(Apun[, i]), - measureIndPars(Arew[, i]), - measureIndPars(beta[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("Apun", - "Arew", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - if (modelRegressor) { - ev_c <- apply(parVals$mr_ev_c, c(2, 3), measureIndPars) - ev_nc <- apply(parVals$mr_ev_nc, c(2, 3), measureIndPars) - pe <- apply(parVals$mr_pe, c(2, 3), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - regressors <- NULL - regressors$ev_c <- ev_c - regressors$ev_nc <- ev_nc - regressors$pe <- pe - - modelData$modelRegressor <- regressors + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/prl_rp_multipleB.R b/R/prl_rp_multipleB.R index 72ba9fb8..07541231 100644 --- a/R/prl_rp_multipleB.R +++ b/R/prl_rp_multipleB.R @@ -1,383 +1,83 @@ -#' Probabilistic Reversal Learning Task, multiple blocks per subject -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "Apun" (punishment learning rate), "Arew" (reward learning rate), and "beta" (inverse temperature). -#' -#' Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Reward-Punishment Model (Ouden et al., 2013, Neuron) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "outcome", and "block". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"prl_rp_multipleB"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} -#' \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} -#' \item{\code{"block"}}{An integer value representing the block number of the current trial (e.g., 1 1 1 2 2 2).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION prl_rp_multipleB +#' @templateVar CONTRIBUTOR (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Probabilistic Reversal Learning Task +#' @templateVar MODEL_NAME Reward-Punishment Model +#' @templateVar MODEL_CITE (Ouden et al., 2013, Neuron) +#' @templateVar MODEL_TYPE Multiple-Block Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "block", "choice", "outcome" +#' @templateVar PARAMETERS "Apun" (punishment learning rate), "Arew" (reward learning rate), "beta" (inverse temperature) +#' @templateVar REGRESSORS "ev_c", "ev_nc", "pe" +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"block"}{A unique identifier for each of the multiple blocks within each subject.} +#' @templateVar DETAILS_DATA_3 \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} +#' @templateVar DETAILS_DATA_4 \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -#' Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- prl_rp_multipleB(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -prl_rp_multipleB <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - cat("************************************\n") - cat("** Extract model-based regressors **\n") - cat("************************************\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "prl_multipleB_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_Apun", "mu_Arew", "mu_beta", - "sigma", - "Apun", "Arew", "beta", - "log_lik") - - if (modelRegressor) - POI <- c(POI, "mr_ev_c", "mr_ev_nc", "mr_pe") - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "prl_rp_multipleB" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - maxB <- length(unique(rawdata$block)) # maximum number of block - B <- NULL # number of blocks for each subject - Tsubj <- array(0, c(numSubjs, maxB)) # number of trials for each subject, in each block - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - tmpDat <- subset(rawdata, rawdata$subjID == curSubj) - tmpAllBlocks <- unique(tmpDat$block) # temp. subject's all blocks - B[i] <- length(tmpAllBlocks) - - for (bIdx in 1:B[i]) - Tsubj[i, bIdx] = sum(tmpDat$block == tmpAllBlocks[bIdx]) - } - - # Setting maxTrials - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) blocks per subject = ", maxB, "\n\n") - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - choice <- array(-1, c(numSubjs, maxB, maxTrials)) - outcome <- array(0, c(numSubjs, maxB, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - tmpDat <- subset(rawdata, rawdata$subjID == curSubj) - tmpAllBlocks <- unique(tmpDat$block) # temp. subject's all blocks - - for (bIdx in 1:B[i]) { - tmp <- subset(tmpDat, tmpDat$block == tmpAllBlocks[bIdx]) - useTrials <- Tsubj[i, bIdx] - - choice[i, bIdx, 1:useTrials] <- tmp$choice - outcome[i, bIdx, 1:useTrials] <- sign(tmp$outcome) # use sign - } - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - maxB = maxB, - B = B, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, - numPars = numPars -) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 0.1, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") - } - - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3] / 10)), - sigma = c(1.0, 1.0, 1.0), - Apun_pr = rep(qnorm(inits_fixed[1]), numSubjs), - Arew_pr = rep(qnorm(inits_fixed[2]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[3] / 10), numSubjs) - ) - } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } else { - options(mc.cores = ncore) +#' Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. +#' (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), +#' 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 + +prl_rp_multipleB <- hBayesDM_model( + task_name = "prl", + model_name = "rp", + model_type = "multipleB", + data_columns = c("subjID", "block", "choice", "outcome"), + parameters = list("Apun" = c(0, 0.1, 1), + "Arew" = c(0, 0.1, 1), + "beta" = c(0, 1, 10)), + regressors = list("ev_c" = 3, + "ev_nc" = 3, + "pe" = 3), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + b_subjs <- general_info$b_subjs + b_max <- general_info$b_max + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + choice <- array(-1, c(n_subj, b_max, t_max)) + outcome <- array( 0, c(n_subj, b_max, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + DT_subj <- raw_data[subjid == subj] + blocks_of_subj <- unique(DT_subj$block) + + for (b in 1:b_subjs[i]) { + curr_block <- blocks_of_subj[b] + DT_curr_block <- DT_subj[block == curr_block] + t <- t_subjs[i, b] + + choice[i, b, 1:t] <- DT_curr_block$choice + outcome[i, b, 1:t] <- sign(DT_curr_block$outcome) # use sign + } } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m <- stanmodels$prl_rp_multipleB - - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit <- rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - Apun <- parVals$Apun - Arew <- parVals$Arew - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - allIndPars[i,] <- c(measureIndPars(Apun[, i]), - measureIndPars(Arew[, i]), - measureIndPars(beta[, i])) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + B = b_max, + Bsubj = b_subjs, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("Apun", - "Arew", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - if (modelRegressor) { - ev_c <- apply(parVals$mr_ev_c, c(2, 3, 4), measureIndPars) - ev_nc <- apply(parVals$mr_ev_nc, c(2, 3, 4), measureIndPars) - pe <- apply(parVals$mr_pe, c(2, 3, 4), measureIndPars) - - # Initialize modelRegressor and add model-based regressors - regressors <- NULL - regressors$ev_c <- ev_c - regressors$ev_nc <- ev_nc - regressors$pe <- pe - - modelData$modelRegressor <- regressors - } - - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/pst_gainloss_Q.R b/R/pst_gainloss_Q.R index fe4bc251..57f76944 100644 --- a/R/pst_gainloss_Q.R +++ b/R/pst_gainloss_Q.R @@ -1,385 +1,74 @@ -#' Probabilistic Selection Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Probabilistic Selection Task -#' with the following parameters: -#' "alpha_pos" (Learning rate for positive feedbacks), -#' "alpha_neg" (Learning rate for negative feedbacks), and -#' "beta" (inverse temperature). -#' -#' Contributor: \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} -#' -#' \strong{MODEL:} -#' Gain-loss Q learning model (Frank et al., 2007) -#' -#' @param data A .txt file containing the data to be modeled. -#' Data columns should be labelled as follows: -#' "subjID", "type", "choice", and "reward". -#' See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"pst_gainloss_Q"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Probabilistic Selection Task, there should be four columns of data with the labels -#' "subjID", "type", "choice", and "reward". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"type"}}{The type of stimuli in the trial. For given 6 stimuli, \code{"type"} should be given -#' in a form as \code{"option1""option2"}, e.g., \code{12}, \code{34}, \code{56}. -#' -#' The code for each option should be defined as below: -#' \tabular{ccl}{ -#' Code \tab Stimulus \tab Probability to win \cr -#' \code{1} \tab A \tab 80\% \cr -#' \code{2} \tab B \tab 20\% \cr -#' \code{3} \tab C \tab 70\% \cr -#' \code{4} \tab D \tab 30\% \cr -#' \code{5} \tab E \tab 60\% \cr -#' \code{6} \tab F \tab 40\% -#' } -#' The function will work even if you use different probabilities for stimuli, -#' but the total number of stimuli should be less than or equal to 6. -#' } -#' \item{\code{"choice"}}{Whether a subject choose the left option between given two options.} -#' \item{\code{"reward"}}{Amount of reward as a result of the choice.} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION pst_gainloss_Q +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} +#' @templateVar TASK_NAME Probabilistic Selection Task +#' @templateVar MODEL_NAME Gain-Loss Q Learning Model +#' @templateVar MODEL_CITE (Frank et al., 2007, PNAS) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "type", "choice", "reward" +#' @templateVar PARAMETERS "alpha_pos" (learning rate for positive feedbacks), "alpha_neg" (learning rate for negative feedbacks), "beta" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"type"}{Two-digit number indicating which pair of stimuli were presented for that trial, e.g. \code{12}, \code{34}, or \code{56}. The digit on the left (tens-digit) indicates the presented stimulus for option1, while the digit on the right (ones-digit) indicates that for option2.\cr Code for each stimulus type (1~6) is defined as below: \tabular{ccl}{Code \tab Stimulus \tab Probability to win \cr \code{1} \tab A \tab 80\% \cr \code{2} \tab B \tab 20\% \cr \code{3} \tab C \tab 70\% \cr \code{4} \tab D \tab 30\% \cr \code{5} \tab E \tab 60\% \cr \code{6} \tab F \tab 40\%} The modeling will still work even if different probabilities are used for the stimuli; however, the total number of stimuli should be less than or equal to 6.} +#' @templateVar DETAILS_DATA_3 \item{"choice"}{Whether the subject chose the left option (option1) out of the given two options (i.e. if option1 was chosen, 1; if option2 was chosen, 0).} +#' @templateVar DETAILS_DATA_4 \item{"reward"}{Amount of reward earned as a result of the trial.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Frank, M. J., Moustafa, A. A., Haughey, H. M., Curran, T., & Hutchison, K. E. (2007). -#' Genetic triple dissociation reveals multiple roles for dopamine in reinforcement learning. -#' Proceedings of the National Academy of Sciences, 104(41), 16311-16316. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- pst_gainloss_Q(data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -pst_gainloss_Q <- function(data = "choose", - niter = 2000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "pst_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[, "subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_alpha_pos", "mu_alpha_neg", "mu_beta", - "alpha_pos" , "alpha_neg", "beta", - "log_lik") - - # TODO: Check which indices are needed for regressors - if (modelRegressor) - POI <- c(POI) - - if (inc_postpred) - POI <- c(POI, "y_pred") - - modelName <- "pst_gainloss_Q" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - option1 <- array(-1, c(numSubjs, maxTrials)) - option2 <- array(-1, c(numSubjs, maxTrials)) - choice <- array(-1, c(numSubjs, maxTrials)) - reward <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - subj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == subj) - - option1[i, 1:useTrials] <- tmp$type %/% 10 - option2[i, 1:useTrials] <- tmp$type %% 10 - choice[i, 1:useTrials] <- tmp$choice - reward[i, 1:useTrials] <- tmp$reward - } - - dataList <- list( - 'N' = numSubjs, - 'T' = maxTrials, - 'Tsubj' = Tsubj, - 'option1' = option1, - 'option2' = option2, - 'choice' = choice, - 'reward' = reward - ) - - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 1.0, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") - } - - # TODO: Change expressions of randomly generated values in genInitList - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3] / 10)), - sigma = c(1.0, 1.0, 1.0), - alpha_pos_pr = rep(qnorm(inits_fixed[1]), numSubjs), - alpha_pos_pr = rep(qnorm(inits_fixed[2]), numSubjs), - beta_pr = rep(qnorm(inits_fixed[3] / 10), numSubjs) - ) - } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) +#' Frank, M. J., Moustafa, A. A., Haughey, H. M., Curran, T., & Hutchison, K. E. (2007). Genetic +#' triple dissociation reveals multiple roles for dopamine in reinforcement learning. Proceedings +#' of the National Academy of Sciences, 104(41), 16311-16316. + +pst_gainloss_Q <- hBayesDM_model( + task_name = "pst", + model_name = "gainloss_Q", + data_columns = c("subjID", "type", "choice", "reward"), + parameters = list("alpha_pos" = c(0, 0.5, 1), + "alpha_neg" = c(0, 0.5, 1), + "beta" = c(0, 1, 10)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + option1 <- array(-1, c(n_subj, t_max)) + option2 <- array(-1, c(n_subj, t_max)) + choice <- array(-1, c(n_subj, t_max)) + reward <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + option1[i, 1:t] <- DT_subj$type %/% 10 + option2[i, 1:t] <- DT_subj$type %% 10 + choice[i, 1:t] <- DT_subj$choice + reward[i, 1:t] <- DT_subj$reward } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m <- stanmodels$pst_gainloss_Q - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit <- rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + option1 = option1, + option2 = option2, + choice = choice, + reward = reward + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - alpha_pos <- parVals$alpha_pos - alpha_neg <- parVals$alpha_neg - beta <- parVals$beta - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - # TODO: Use *apply function instead of for loop - for (i in 1:numSubjs) { - allIndPars[i, ] <- c(measureIndPars(alpha_pos[, i]), - measureIndPars(alpha_neg[, i]), - measureIndPars(beta[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("alpha_pos", - "alpha_neg", - "beta", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - # TODO: Change this block after re-choosing the proper regressors - if (modelRegressor) { - # Initialize modelRegressor and add model-based regressors - modelRegressor <- NULL - modelData$modelRegressor <- modelRegressor - } - - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/ra_noLA.R b/R/ra_noLA.R old mode 100755 new mode 100644 index 67d9153c..73cbb967 --- a/R/ra_noLA.R +++ b/R/ra_noLA.R @@ -1,351 +1,81 @@ -#' Risk Aversion Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: "rho" (risk aversion) and "tau" (inverse temp). -#' -#' \strong{MODEL:} -#' Prospect Theory without a loss aversion (LA) parameter -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Risk Aversion Task, there should be four columns of data with the labels -#' "subjID", "riskyGain", "riskyLoss", and "safeOption". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).} -#' \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} -#' \item{\code{"cert"}}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} -#' \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION ra_noLA +#' @templateVar TASK_NAME Risk Aversion Task +#' @templateVar MODEL_NAME Prospect Theory (Sokol-Hessner et al., 2009, PNAS), without loss aversion (LA) parameter +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "gain", "loss", "cert", "gamble" +#' @templateVar PARAMETERS "rho" (risk aversion), "tau" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 5 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"gain"}{Possible (50\%) gain outcome of a risky option (e.g. 9).} +#' @templateVar DETAILS_DATA_3 \item{"loss"}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} +#' @templateVar DETAILS_DATA_4 \item{"cert"}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} +#' @templateVar DETAILS_DATA_5 \item{"gamble"}{If gamble was taken, gamble == 1; else gamble == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +#' Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & +#' Smith, E. E. (2009). Thinking like a Trader Selectively Reduces Individuals' Loss Aversion. +#' Proceedings of the National Academy of Sciences of the United States of America, 106(13), +#' 5035-5040. http://www.pnas.org/content/106/13/5035 #' #' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- ra_noLA(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' #' +#' \dontrun{ #' # Paths to data published in Sokol-Hessner et al. (2009) -#' path_to_attend_data = system.file("extdata/ra_data_attend.txt", package = "hBayesDM") -#' -#' path_to_regulate_data = system.file("extdata/ra_data_reappraisal.txt", package = "hBayesDM") +#' path_to_attend_data <- system.file("extdata", "ra_data_attend.txt", package = "hBayesDM") +#' path_to_regulate_data <- system.file("extdata", "ra_data_reappraisal.txt", package = "hBayesDM") #' } -ra_noLA <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "ra_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 2 - POI <- c("mu_rho", "mu_tau", - "sigma", - "rho" , "tau", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "ra_noLA" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - gain <- array(0, c(numSubjs, maxTrials)) - loss <- array(0, c(numSubjs, maxTrials)) - cert <- array(0, c(numSubjs, maxTrials)) - gamble <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - gain[i, 1:useTrials] <- tmp[1:useTrials, "gain"] - loss[i, 1:useTrials] <- abs(tmp[1:useTrials, "loss"]) # absolute loss amount - cert[i, 1:useTrials] <- tmp[1:useTrials, "cert"] - gamble[i, 1:useTrials] <- tmp[1:useTrials, "gamble"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - numPars = numPars, - gain = gain, - loss = loss, - cert = cert, - gamble = gamble) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]/2), qnorm(inits_fixed[2]/5)), - sigma = c(1.0, 1.0), - rho_p = rep(qnorm(inits_fixed[1]/2), numSubjs), - tau_p = rep(qnorm(inits_fixed[2]/5), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) +ra_noLA <- hBayesDM_model( + task_name = "ra", + model_name = "noLA", + data_columns = c("subjID", "gain", "loss", "cert", "gamble"), + parameters = list("rho" = c(0, 1, 2), + "tau" = c(0, 1, 5)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + gain <- array( 0, c(n_subj, t_max)) + loss <- array( 0, c(n_subj, t_max)) + cert <- array( 0, c(n_subj, t_max)) + gamble <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + gain[i, 1:t] <- DT_subj$gain + loss[i, 1:t] <- abs(DT_subj$loss) # absolute loss amount + cert[i, 1:t] <- DT_subj$cert + gamble[i, 1:t] <- DT_subj$gamble } - } else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$ra_noLA - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + gain = gain, + loss = loss, + cert = cert, + gamble = gamble + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - rho <- parVals$rho - tau <- parVals$tau - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(rho[, i]), - mean(tau[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(rho[, i]), - median(tau[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(rho[, i]), - estimate_mode(tau[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("rho", - "tau", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/ra_noRA.R b/R/ra_noRA.R old mode 100755 new mode 100644 index fb4e8e73..1fabcf63 --- a/R/ra_noRA.R +++ b/R/ra_noRA.R @@ -1,351 +1,81 @@ -#' Risk Aversion Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: "lambda" (loss aversion) and "tau" (inverse temp). -#' -#' \strong{MODEL:} -#' Prospect Theory without a risk aversion (RA) parameter -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Risk Aversion Task, there should be four columns of data with the labels -#' "subjID", "riskyGain", "riskyLoss", and "safeOption". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).} -#' \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} -#' \item{\code{"cert"}}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} -#' \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION ra_noRA +#' @templateVar TASK_NAME Risk Aversion Task +#' @templateVar MODEL_NAME Prospect Theory (Sokol-Hessner et al., 2009, PNAS), without risk aversion (RA) parameter +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "gain", "loss", "cert", "gamble" +#' @templateVar PARAMETERS "lambda" (loss aversion), "tau" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 5 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"gain"}{Possible (50\%) gain outcome of a risky option (e.g. 9).} +#' @templateVar DETAILS_DATA_3 \item{"loss"}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} +#' @templateVar DETAILS_DATA_4 \item{"cert"}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} +#' @templateVar DETAILS_DATA_5 \item{"gamble"}{If gamble was taken, gamble == 1; else gamble == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +#' Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & +#' Smith, E. E. (2009). Thinking like a Trader Selectively Reduces Individuals' Loss Aversion. +#' Proceedings of the National Academy of Sciences of the United States of America, 106(13), +#' 5035-5040. http://www.pnas.org/content/106/13/5035 #' #' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- ra_noRA(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' #' +#' \dontrun{ #' # Paths to data published in Sokol-Hessner et al. (2009) -#' path_to_attend_data = system.file("extdata/ra_data_attend.txt", package = "hBayesDM") -#' -#' path_to_regulate_data = system.file("extdata/ra_data_reappraisal.txt", package = "hBayesDM") +#' path_to_attend_data <- system.file("extdata", "ra_data_attend.txt", package = "hBayesDM") +#' path_to_regulate_data <- system.file("extdata", "ra_data_reappraisal.txt", package = "hBayesDM") #' } -ra_noRA <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "ra_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 2 - POI <- c("mu_lambda", "mu_tau", - "sigma", - "lambda" , "tau", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "ra_noRA" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[sIdx] - Tsubj[sIdx] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - gain <- array(0, c(numSubjs, maxTrials)) - loss <- array(0, c(numSubjs, maxTrials)) - cert <- array(0, c(numSubjs, maxTrials)) - gamble <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - gain[i, 1:useTrials] <- tmp[1:useTrials, "gain"] - loss[i, 1:useTrials] <- abs(tmp[1:useTrials, "loss"]) # absolute loss amount - cert[i, 1:useTrials] <- tmp[1:useTrials, "cert"] - gamble[i, 1:useTrials] <- tmp[1:useTrials, "gamble"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - numPars = numPars, - gain = gain, - loss = loss, - cert = cert, - gamble = gamble) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]/5), qnorm(inits_fixed[2]/5)), - sigma = c(1.0, 1.0), - lambda_p = rep(qnorm(inits_fixed[1]/5), numSubjs), - tau_p = rep(qnorm(inits_fixed[2]/5), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) +ra_noRA <- hBayesDM_model( + task_name = "ra", + model_name = "noRA", + data_columns = c("subjID", "gain", "loss", "cert", "gamble"), + parameters = list("lambda" = c(0, 1, 5), + "tau" = c(0, 1, 5)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + gain <- array( 0, c(n_subj, t_max)) + loss <- array( 0, c(n_subj, t_max)) + cert <- array( 0, c(n_subj, t_max)) + gamble <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + gain[i, 1:t] <- DT_subj$gain + loss[i, 1:t] <- abs(DT_subj$loss) # absolute loss amount + cert[i, 1:t] <- DT_subj$cert + gamble[i, 1:t] <- DT_subj$gamble } - } else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$ra_noRA - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + gain = gain, + loss = loss, + cert = cert, + gamble = gamble + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - lambda <- parVals$lambda - tau <- parVals$tau - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(lambda[, i]), - mean(tau[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(lambda[, i]), - median(tau[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(lambda[, i]), - estimate_mode(tau[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("lambda", - "tau", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/ra_prospect.R b/R/ra_prospect.R old mode 100755 new mode 100644 index 4fb353df..30b85f89 --- a/R/ra_prospect.R +++ b/R/ra_prospect.R @@ -1,361 +1,83 @@ -#' Risk Aversion Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: "rho" (risk aversion), "lambda" (loss aversion), and "tau" (inverse temp). -#' -#' \strong{MODEL:} -#' Prospect Theory (Sokol-Hessner et al., 2009, PNAS) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Risk Aversion Task, there should be four columns of data with the labels -#' "subjID", "riskyGain", "riskyLoss", and "safeOption". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).} -#' \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} -#' \item{\code{"cert"}}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} -#' \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION ra_prospect +#' @templateVar TASK_NAME Risk Aversion Task +#' @templateVar MODEL_NAME Prospect Theory +#' @templateVar MODEL_CITE (Sokol-Hessner et al., 2009, PNAS) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "gain", "loss", "cert", "gamble" +#' @templateVar PARAMETERS "rho" (risk aversion), "lambda" (loss aversion), "tau" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 5 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"gain"}{Possible (50\%) gain outcome of a risky option (e.g. 9).} +#' @templateVar DETAILS_DATA_3 \item{"loss"}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} +#' @templateVar DETAILS_DATA_4 \item{"cert"}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} +#' @templateVar DETAILS_DATA_5 \item{"gamble"}{If gamble was taken, gamble == 1; else gamble == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & Smith, E. E. (2009). Thinking like -#' a Trader Selectively Reduces Individuals' Loss Aversion. Proceedings of the National Academy of Sciences of the United States -#' of America, 106(13), 5035-5040. http://doi.org/10.2307/40455144?ref = search-gateway:1f452c8925000031ef87ca756455c9e3 -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +#' Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & +#' Smith, E. E. (2009). Thinking like a Trader Selectively Reduces Individuals' Loss Aversion. +#' Proceedings of the National Academy of Sciences of the United States of America, 106(13), +#' 5035-5040. http://www.pnas.org/content/106/13/5035 #' #' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- ra_prospect(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' #' +#' \dontrun{ #' # Paths to data published in Sokol-Hessner et al. (2009) -#' path_to_attend_data = system.file("extdata/ra_data_attend.txt", package = "hBayesDM") -#' -#' path_to_regulate_data = system.file("extdata/ra_data_reappraisal.txt", package = "hBayesDM") +#' path_to_attend_data <- system.file("extdata", "ra_data_attend.txt", package = "hBayesDM") +#' path_to_regulate_data <- system.file("extdata", "ra_data_reappraisal.txt", package = "hBayesDM") #' } -ra_prospect <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "ra_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table( data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_rho", "mu_lambda", "mu_tau", - "sigma", - "rho" , "lambda", "tau", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "ra_prospect" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector( rep( 0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[ sIdx] - Tsubj[sIdx] <- sum( rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - gain <- array(0, c(numSubjs, maxTrials)) - loss <- array(0, c(numSubjs, maxTrials)) - cert <- array(0, c(numSubjs, maxTrials)) - gamble <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - gain[i, 1:useTrials] <- tmp[1:useTrials, "gain"] - loss[i, 1:useTrials] <- abs(tmp[1:useTrials, "loss"]) # absolute loss amount - cert[i, 1:useTrials] <- tmp[1:useTrials, "cert"] - gamble[i, 1:useTrials] <- tmp[1:useTrials, "gamble"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - numPars = numPars, - gain = gain, - loss = loss, - cert = cert, - gamble = gamble) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c( qnorm( inits_fixed[1]/2), qnorm( inits_fixed[2]/5), qnorm( inits_fixed[3]/5)), - sigma = c(1.0, 1.0, 1.0), - rho_p = rep(qnorm( inits_fixed[1]/2), numSubjs), - lambda_p = rep(qnorm( inits_fixed[2]/5), numSubjs), - tau_p = rep(qnorm( inits_fixed[3]/5), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) +ra_prospect <- hBayesDM_model( + task_name = "ra", + model_name = "prospect", + data_columns = c("subjID", "gain", "loss", "cert", "gamble"), + parameters = list("rho" = c(0, 1, 2), + "lambda" = c(0, 1, 5), + "tau" = c(0, 1, 5)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + gain <- array( 0, c(n_subj, t_max)) + loss <- array( 0, c(n_subj, t_max)) + cert <- array( 0, c(n_subj, t_max)) + gamble <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + gain[i, 1:t] <- DT_subj$gain + loss[i, 1:t] <- abs(DT_subj$loss) # absolute loss amount + cert[i, 1:t] <- DT_subj$cert + gamble[i, 1:t] <- DT_subj$gamble } - } else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$ra_prospect - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + gain = gain, + loss = loss, + cert = cert, + gamble = gamble + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - rho <- parVals$rho - lambda <- parVals$lambda - tau <- parVals$tau - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c( mean(rho[, i]), - mean(lambda[, i]), - mean(tau[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c( median(rho[, i]), - median(lambda[, i]), - median(tau[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c( estimate_mode(rho[, i]), - estimate_mode(lambda[, i]), - estimate_mode(tau[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("rho", - "lambda", - "tau", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/rdt_happiness.R b/R/rdt_happiness.R old mode 100755 new mode 100644 index 9857c15f..dee65a7d --- a/R/rdt_happiness.R +++ b/R/rdt_happiness.R @@ -1,391 +1,94 @@ -#' Risky Decision Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Risky Decision Task (Rutledge et al., 2014, PNAS) with the following parameters: "w0" (baseline), "w1" (weight of certain rewards), "w2" (weight of expected values), "w3" (weight of reward prediction errors), "gamma" (forgetting factor),and "sig" (standard deviation of error). -#' -#' Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Happiness Computational Model (Rutledge et al., 2014, PNAS) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", and "RT_happy". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"rdt_happiness"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Risky Decision Task, there should be nine columns of data with the labels -#' "subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", and "RT_happy". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).} -#' \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} -#' \item{\code{"cert"}}{Guaranteed amount of a safe option.} -#' \item{\code{"type"}}{loss == -1, mixed == 0, gain == 1} -#' \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.} -#' \item{\code{"outcome"}}{The result of the chosen option.} -#' \item{\code{"happy"}}{The happiness score.} -#' \item{\code{"RT_happy"}}{The reaction time of the happiness trial.} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION rdt_happiness +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Risky Decision Task +#' @templateVar MODEL_NAME Happiness Computational Model +#' @templateVar MODEL_CITE (Rutledge et al., 2014, PNAS) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", "RT_happy" +#' @templateVar PARAMETERS "w0" (baseline), "w1" (weight of certain rewards), "w2" (weight of expected values), "w3" (weight of reward prediction errors), "gam" (forgetting factor), "sig" (standard deviation of error) +#' @templateVar LENGTH_DATA_COLUMNS 9 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"gain"}{Possible (50\%) gain outcome of a risky option (e.g. 9).} +#' @templateVar DETAILS_DATA_3 \item{"loss"}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} +#' @templateVar DETAILS_DATA_4 \item{"cert"}{Guaranteed amount of a safe option.} +#' @templateVar DETAILS_DATA_5 \item{"type"}{loss == -1, mixed == 0, gain == 1} +#' @templateVar DETAILS_DATA_6 \item{"gamble"}{If gamble was taken, gamble == 1; else gamble == 0.} +#' @templateVar DETAILS_DATA_7 \item{"outcome"}{Result of the trial.} +#' @templateVar DETAILS_DATA_8 \item{"happy"}{Happiness score.} +#' @templateVar DETAILS_DATA_9 \item{"RT_happy"}{Reaction time for answering the happiness score.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Rutledge, R. B., Skandali, N., Dayan, P., & Dolan, R. J. (2014). A computational and neural model of momentary subjective well-being. -#' Proceedings of the National Academy of Sciences, 111(33), 12252-12257. -#' -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- rdt_happiness(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' -#' -#' -#' } - -rdt_happiness <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "rdt_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table( data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 6 - POI <- c("mu_w0", "mu_w1", "mu_w2", "mu_w3", "mu_gam", "mu_sig", - "sigma", - "w0" , "w1", "w2", "w3", "gam", "sig", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "rdt_happiness" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector( rep( 0, numSubjs)) # number of trials for each subject - - for (sIdx in 1:numSubjs) { - curSubj <- subjList[ sIdx] - Tsubj[sIdx] <- sum( rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - gain <- array(0, c(numSubjs, maxTrials)) - loss <- array(0, c(numSubjs, maxTrials)) - cert <- array(0, c(numSubjs, maxTrials)) - gamble <- array(-1, c(numSubjs, maxTrials)) - type <- array(-1, c(numSubjs, maxTrials)) - outcome <- array(0, c(numSubjs, maxTrials)) - happy <- array(0, c(numSubjs, maxTrials)) - RT_happy <- array(0, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - gain[i, 1:useTrials] <- tmp[1:useTrials, "gain"] - loss[i, 1:useTrials] <- abs(tmp[1:useTrials, "loss"]) # absolute loss amount - cert[i, 1:useTrials] <- tmp[1:useTrials, "cert"] - gamble[i, 1:useTrials] <- tmp[1:useTrials, "gamble"] - type[i, 1:useTrials] <- tmp[1:useTrials, "type"] - outcome[i, 1:useTrials] <- tmp[1:useTrials, "outcome"] - happy[i, 1:useTrials] <- tmp[1:useTrials, "happy"] - RT_happy[i, 1:useTrials] <- tmp[1:useTrials, "RT_happy"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - numPars = numPars, - gain = gain, - loss = loss, - cert = cert, - gamble = gamble, - type = type, - outcome = outcome, - happy = happy, - RT_happy = RT_happy) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 1.0, 1.0, 1.0, 1.0, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c( inits_fixed[1], inits_fixed[2], inits_fixed[3], inits_fixed[4], qnorm( inits_fixed[5]), log( inits_fixed[6])), - sigma = c(1.0, 1.0, 1.0, 1.0, 1.0, 1.0), - w0_p = rep(inits_fixed[1], numSubjs), - w1_p = rep(inits_fixed[2], numSubjs), - w2_p = rep(inits_fixed[3], numSubjs), - w3_p = rep(inits_fixed[4], numSubjs), - gam_p = rep(qnorm( inits_fixed[5]), numSubjs), - sig_p = rep(log( inits_fixed[6]), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) +#' Rutledge, R. B., Skandali, N., Dayan, P., & Dolan, R. J. (2014). A computational and neural model +#' of momentary subjective well-being. Proceedings of the National Academy of Sciences, 111(33), +#' 12252-12257. + +rdt_happiness <- hBayesDM_model( + task_name = "rdt", + model_name = "happiness", + data_columns = c("subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", "RT_happy"), + parameters = list("w0" = c(-Inf, 1, Inf), + "w1" = c(-Inf, 1, Inf), + "w2" = c(-Inf, 1, Inf), + "w3" = c(-Inf, 1, Inf), + "gam" = c(0, 0.5, 1), + "sig" = c(0, 1, Inf)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + gain <- array( 0, c(n_subj, t_max)) + loss <- array( 0, c(n_subj, t_max)) + cert <- array( 0, c(n_subj, t_max)) + type <- array(-1, c(n_subj, t_max)) + gamble <- array(-1, c(n_subj, t_max)) + outcome <- array( 0, c(n_subj, t_max)) + happy <- array( 0, c(n_subj, t_max)) + RT_happy <- array( 0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + gain[i, 1:t] <- DT_subj$gain + loss[i, 1:t] <- abs(DT_subj$loss) # absolute loss amount + cert[i, 1:t] <- DT_subj$cert + type[i, 1:t] <- DT_subj$type + gamble[i, 1:t] <- DT_subj$gamble + outcome[i, 1:t] <- DT_subj$outcome + happy[i, 1:t] <- DT_subj$happy + RT_happy[i, 1:t] <- DT_subj$rthappy } - } else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$rdt_happiness - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + gain = gain, + loss = loss, + cert = cert, + type = type, + gamble = gamble, + outcome = outcome, + happy = happy, + RT_happy = RT_happy + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - w0 <- parVals$w0 - w1 <- parVals$w1 - w2 <- parVals$w2 - w3 <- parVals$w3 - gam <- parVals$gam - sig <- parVals$sig - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c( mean(w0[, i]), - mean(w1[, i]), - mean(w2[, i]), - mean(w3[, i]), - mean(gam[, i]), - mean(sig[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c( median(w0[, i]), - median(w1[, i]), - median(w2[, i]), - median(w3[, i]), - median(gam[, i]), - median(sig[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c( estimate_mode(w0[, i]), - estimate_mode(w1[, i]), - estimate_mode(w2[, i]), - estimate_mode(w3[, i]), - estimate_mode(gam[, i]), - estimate_mode(sig[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("w0", - "w1", - "w2", - "w3", - "gam", - "sig", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/rhat.R b/R/rhat.R old mode 100755 new mode 100644 diff --git a/R/settings.R b/R/settings.R new file mode 100644 index 00000000..36bb25f6 --- /dev/null +++ b/R/settings.R @@ -0,0 +1,6 @@ +#' @noRd +if (Sys.getenv('BUILD_ALL') == "true") { + FLAG_BUILD_ALL <- TRUE +} else { + FLAG_BUILD_ALL <- FALSE +} diff --git a/R/stanmodels.R b/R/stanmodels.R old mode 100755 new mode 100644 index 4cd77695..3201c11f --- a/R/stanmodels.R +++ b/R/stanmodels.R @@ -1,40 +1,38 @@ # Part of the rstanarm package for estimating model parameters -# Copyright (C) 2015, 2016 Trustees of Columbia University -# +# Copyright (C) 2015, 2016, 2017 Trustees of Columbia University +# # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # This file is only intended to be used during the installation process # nocov start -MODELS_HOME <- "exec" -if (!file.exists(MODELS_HOME)) MODELS_HOME <- sub("R$", "exec", getwd()) +MODELS_HOME <- "inst" +if (!file.exists(MODELS_HOME)) MODELS_HOME <- sub("R$", "src", getwd()) -stan_files <- dir(MODELS_HOME, pattern = "stan$", full.names = TRUE) -stanmodels <- sapply(stan_files, function(f) { +stan_files <- dir(file.path(MODELS_HOME, "stan_files"), + pattern = "stan$", full.names = TRUE) +stanmodels <- lapply(stan_files, function(f) { model_cppname <- sub("\\.stan$", "", basename(f)) - isystem <- system.file("chunks", package = methods::getPackageName(environment(), FALSE)) - if (!file.exists(file.path(isystem, "common_functions.stan"))) - isystem <- file.path("inst", "chunks") - if (!file.exists(file.path(isystem, "common_functions.stan"))) - isystem <- file.path("..", "inst", "chunks") - stanfit <- rstan::stanc_builder(f, isystem) - stanfit$model_cpp <- list(model_cppname = stanfit$model_name, + stanfit <- rstan::stanc(f, allow_undefined = TRUE, + obfuscate_model_name = FALSE) + stanfit$model_cpp <- list(model_cppname = stanfit$model_name, model_cppcode = stanfit$cppcode) - return(do.call(methods::new, args = c(stanfit[-(1:3)], Class = "stanmodel", + return(do.call(methods::new, args = c(stanfit[-(1:3)], Class = "stanmodel", mk_cppmodule = function(x) get(paste0("model_", model_cppname))))) } ) -names(stanmodels) <- sub("\\.stan$", "", basename(names(stanmodels))) +names(stanmodels) <- sub("\\.stan$", "", basename(stan_files)) rm(MODELS_HOME) # nocov end + diff --git a/R/ts_par4.R b/R/ts_par4.R index a3a1b636..0eed33e9 100644 --- a/R/ts_par4.R +++ b/R/ts_par4.R @@ -1,372 +1,78 @@ -#' Two-Step Task (Daw et al., 2011, Neuron) -#' -#' @description -#' Hierarchical Bayesian Modeling of the Two-Step Task using the following 4 parameters: with the following parameters: "a" (learnign rate for both stages 1 and 2), "beta" (inverse temperature for both stages 1 and 2), "pi" (perseverance), and "w" (model-based weight).\cr\cr -#' -#' Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Hybrid model (Daw et al., 2011; Wunderlich et al, 2012) with four parameters -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param trans_prob Common state transition probability from Stage (Level) 1 to Stage 2. Defaults to 0.7. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ts_par4"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Two-Step Task, there should be four columns of data with the labels -#' "subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"level1_choice"}}{Choice of the level 1. 1: stimulus 1, 2: stimulus 2} -#' \item{\code{"level2_choice"}}{Choice of the level 2. 1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6.} -#' \item{\code{"reward"}}{Reward of the level 2 (0 or 1)} -#' } -#' \strong{*} Note: In our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. -#' Choosing stimulus 3 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. -#' The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION ts_par4 +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Two-Step Task +#' @templateVar TASK_CITE (Daw et al., 2011, Neuron) +#' @templateVar MODEL_NAME Hybrid Model (Daw et al., 2011; Wunderlich et al., 2012), with 4 parameters +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "level1_choice", "level2_choice", "reward" +#' @templateVar PARAMETERS "a" (learning rate for both stages 1 & 2), "beta" (inverse temperature for both stages 1 & 2), "pi" (perseverance), "w" (model-based weight) +#' @templateVar ADDITIONAL_ARG \code{trans_prob}: Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7. +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"level1_choice"}{Choice made for Level (Stage) 1 (1: stimulus 1, 2: stimulus 2).} +#' @templateVar DETAILS_DATA_3 \item{"level2_choice"}{Choice made for Level (Stage) 2 (1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6).\cr *Note that, in our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. Similarly, choosing stimulus 2 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. To change this default transition probability, set the function argument \code{trans_prob} to your preferred value.} +#' @templateVar DETAILS_DATA_4 \item{"reward"}{Reward after Level 2 (0 or 1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). Model-Based Influences on Humans' -#' Choices and Striatal Prediction Errors. Neuron, 69(6), 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 -#' -#' Wunderlich, K., Smittenaar, P., & Dolan, R. J. (2012). Dopamine enhances model-based over model-free choice behavior. -#' Neuron, 75(3), 418-424. -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- ts_par4(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' -#' -#' } - -ts_par4 <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10, - trans_prob = 0.7) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } else { - modelPath <- system.file("stan", "ts_par4.stan", package="hBayesDM") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data=="example") { - data <- system.file("extdata", "ts_exampleData.txt", package = "hBayesDM") - } else if (data=="choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table( data, header = T, sep="\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows, ] - cat("The number of rows with NAs=", length(NA_rows), ". They are removed prior to modeling the data. \n", sep="") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 4 - POI <- c("mu_a", "mu_beta", "mu_pi", "mu_w", - "sigma", - "a", "beta", "pi", "w", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred_step1", "y_pred_step2") - } - - modelName <- "ts_par4" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector( rep( 0, numSubjs ) ) # number of trials for each subject - - for ( sIdx in 1:numSubjs ) { - curSubj <- subjList[ sIdx ] - Tsubj[sIdx] <- sum( rawdata$subjID == curSubj ) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - level1_choice <- array(1, c(numSubjs, maxTrials) ) - level2_choice <- array(1, c(numSubjs, maxTrials) ) - reward <- array(0, c(numSubjs, maxTrials) ) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - level1_choice[i, 1:useTrials] <- tmp[1:useTrials, "level1_choice"] - level2_choice[i, 1:useTrials] <- tmp[1:useTrials, "level2_choice"] - reward[i, 1:useTrials] <- tmp[1:useTrials, "reward"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - numPars = numPars, - level1_choice = level1_choice, - level2_choice = level2_choice, - reward = reward, - trans_prob = trans_prob - ) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 1.0, 1.0, 0.5) # "a", "beta", "pi", "w" - } else { - if (length(inits)==numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), log(inits_fixed[2]), qnorm(inits_fixed[3]/5), qnorm(inits_fixed[4])), - sigma = c(1.0, 1.0, 1.0, 1.0), - a_pr = rep(qnorm(inits_fixed[1]), numSubjs), - beta_pr = rep(log(inits_fixed[2]), numSubjs), - pi_pr = rep(qnorm(inits_fixed[3]/5), numSubjs), - w_pr = rep(qnorm(inits_fixed[4]), numSubjs) - ) +#' Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). +#' Model-Based Influences on Humans' Choices and Striatal Prediction Errors. Neuron, 69(6), +#' 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 +#' +#' Wunderlich, K., Smittenaar, P., & Dolan, R. J. (2012). Dopamine enhances model-based over +#' model-free choice behavior. Neuron, 75(3), 418-424. + +ts_par4 <- hBayesDM_model( + task_name = "ts", + model_name = "par4", + data_columns = c("subjID", "level1_choice", "level2_choice", "reward"), + parameters = list("a" = c(0, 0.5, 1), + "beta" = c(0, 1, Inf), + "pi" = c(0, 1, 5), + "w" = c(0, 0.5, 1)), + postpreds = c("y_pred_step1", "y_pred_step2"), + preprocess_func = function(raw_data, general_info, trans_prob = 0.7) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + level1_choice <- array(1, c(n_subj, t_max)) + level2_choice <- array(1, c(n_subj, t_max)) + reward <- array(0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + level1_choice[i, 1:t] <- DT_subj$level1choice + level2_choice[i, 1:t] <- DT_subj$level2choice + reward[i, 1:t] <- DT_subj$reward } - } else { - genInitList <- "random" - } - rstan::rstan_options(auto_write = TRUE) - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$ts_par4 - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize) ) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted=T) - if (inc_postpred) { - parVals$y_pred_step1[parVals$y_pred_step1==-1] <- NA - parVals$y_pred_step2[parVals$y_pred_step2==-1] <- NA - } - - a <- parVals$a - beta <- parVals$beta - pi <- parVals$pi - w <- parVals$w - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars=="mean") { - allIndPars[i, ] <- c( mean(a[, i]), - mean(beta[, i]), - mean(pi[, i]), - mean(w[, i]) ) - } else if (indPars=="median") { - allIndPars[i, ] <- c( median(a[, i]), - median(beta[, i]), - median(pi[, i]), - median(w[, i]) ) - } else if (indPars=="mode") { - allIndPars[i, ] <- c( estimate_mode(a[, i]), - estimate_mode(beta[, i]), - estimate_mode(pi[, i]), - estimate_mode(w[, i]) ) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("a", - "beta", - "pi", - "w", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file=file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData" ) ) ) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + level1_choice = level1_choice, + level2_choice = level2_choice, + reward = reward, + trans_prob = trans_prob + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/ts_par6.R b/R/ts_par6.R index acd93d82..c05e5cfd 100644 --- a/R/ts_par6.R +++ b/R/ts_par6.R @@ -1,381 +1,77 @@ -#' Two-Step Task (Daw et al., 2011, Neuron) -#' -#' @description -#' Hierarchical Bayesian Modeling of the Two-Step Task with the following 6 parameters: "a1" (learnign rate in stage 1), "a2" (learnign rate in stage 2), "beta1" (inverse temperature in stage 1), "beta2" (inverse temperature in stage 2), "pi" (perseverance), and "w" (model-based weight).\cr\cr -#' -#' Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Hybrid model (Daw et al., 2011, Neuron) with 6 parameters -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param trans_prob Common state transition probability from Stage (Level) 1 to Stage 2. Defaults to 0.7. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ts_par6"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Two-Step Task, there should be four columns of data with the labels -#' "subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"level1_choice"}}{Choice of the level 1. 1: stimulus 1, 2: stimulus 2} -#' \item{\code{"level2_choice"}}{Choice of the level 2. 1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6.} -#' \item{\code{"reward"}}{Reward of the level 2 (0 or 1)} -#' } -#' \strong{*} Note: In our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. -#' Choosing stimulus 3 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. -#' The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION ts_par6 +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Two-Step Task +#' @templateVar TASK_CITE (Daw et al., 2011, Neuron) +#' @templateVar MODEL_NAME Hybrid Model (Daw et al., 2011, Neuron), with 6 parameters +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "level1_choice", "level2_choice", "reward" +#' @templateVar PARAMETERS "a1" (learning rate in stage 1), "beta1" (inverse temperature in stage 1), "a2" (learning rate in stage 2), "beta2" (inverse temperature in stage 2), "pi" (perseverance), "w" (model-based weight) +#' @templateVar ADDITIONAL_ARG \code{trans_prob}: Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7. +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"level1_choice"}{Choice made for Level (Stage) 1 (1: stimulus 1, 2: stimulus 2).} +#' @templateVar DETAILS_DATA_3 \item{"level2_choice"}{Choice made for Level (Stage) 2 (1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6).\cr *Note that, in our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. Similarly, choosing stimulus 2 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. To change this default transition probability, set the function argument \code{trans_prob} to your preferred value.} +#' @templateVar DETAILS_DATA_4 \item{"reward"}{Reward after Level 2 (0 or 1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). Model-Based Influences on Humans' -#' Choices and Striatal Prediction Errors. Neuron, 69(6), 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- ts_par6(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' -#' -#' } - -ts_par6 <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10, - trans_prob = 0.7) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } else { - modelPath <- system.file("stan", "ts_par6.stan", package="hBayesDM") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data=="example") { - data <- system.file("extdata", "ts_exampleData.txt", package = "hBayesDM") - } else if (data=="choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table( data, header = T, sep="\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows, ] - cat("The number of rows with NAs=", length(NA_rows), ". They are removed prior to modeling the data. \n", sep="") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 6 - POI <- c("mu_a1", "mu_beta1", "mu_a2", "mu_beta2", "mu_pi", "mu_w", - "sigma", - "a1", "beta1", "a2", "beta2", "pi", "w", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred_step1", "y_pred_step2") - } - - modelName <- "ts_par6" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector( rep( 0, numSubjs ) ) # number of trials for each subject - - for ( sIdx in 1:numSubjs ) { - curSubj <- subjList[ sIdx ] - Tsubj[sIdx] <- sum( rawdata$subjID == curSubj ) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - level1_choice <- array(1, c(numSubjs, maxTrials) ) - level2_choice <- array(1, c(numSubjs, maxTrials) ) - reward <- array(0, c(numSubjs, maxTrials) ) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - level1_choice[i, 1:useTrials] <- tmp[1:useTrials, "level1_choice"] - level2_choice[i, 1:useTrials] <- tmp[1:useTrials, "level2_choice"] - reward[i, 1:useTrials] <- tmp[1:useTrials, "reward"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - numPars = numPars, - level1_choice = level1_choice, - level2_choice = level2_choice, - reward = reward, - trans_prob = trans_prob - ) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 1.0, 0.5, 1.0, 1.0, 0.5) # "a1", "beta1", "a2", "beta2", "pi", "w" - } else { - if (length(inits)==numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), log(inits_fixed[2]), qnorm(inits_fixed[3]), log(inits_fixed[4]) , qnorm(inits_fixed[5]/5), qnorm(inits_fixed[6])), - sigma = c(1.0, 1.0, 1.0, 1.0, 1.0, 1.0), - a1_pr = rep(qnorm(inits_fixed[1]), numSubjs), - beta1_pr = rep(log(inits_fixed[2]), numSubjs), - a2_pr = rep(qnorm(inits_fixed[3]), numSubjs), - beta2_pr = rep(log(inits_fixed[4]), numSubjs), - pi_pr = rep(qnorm(inits_fixed[5]/5), numSubjs), - w_pr = rep(qnorm(inits_fixed[6]), numSubjs) - ) +#' Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). +#' Model-Based Influences on Humans' Choices and Striatal Prediction Errors. Neuron, 69(6), +#' 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 + +ts_par6 <- hBayesDM_model( + task_name = "ts", + model_name = "par6", + data_columns = c("subjID", "level1_choice", "level2_choice", "reward"), + parameters = list("a1" = c(0, 0.5, 1), + "beta1" = c(0, 1, Inf), + "a2" = c(0, 0.5, 1), + "beta2" = c(0, 1, Inf), + "pi" = c(0, 1, 5), + "w" = c(0, 0.5, 1)), + postpreds = c("y_pred_step1", "y_pred_step2"), + preprocess_func = function(raw_data, general_info, trans_prob = 0.7) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + level1_choice <- array(1, c(n_subj, t_max)) + level2_choice <- array(1, c(n_subj, t_max)) + reward <- array(0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + level1_choice[i, 1:t] <- DT_subj$level1choice + level2_choice[i, 1:t] <- DT_subj$level2choice + reward[i, 1:t] <- DT_subj$reward } - } else { - genInitList <- "random" - } - rstan::rstan_options(auto_write = TRUE) - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$ts_par6 - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize) ) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted=T) - if (inc_postpred) { - parVals$y_pred_step1[parVals$y_pred_step1==-1] <- NA - parVals$y_pred_step2[parVals$y_pred_step2==-1] <- NA - } - - a1 <- parVals$a1 - beta1 <- parVals$beta1 - a2 <- parVals$a2 - beta2 <- parVals$beta2 - pi <- parVals$pi - w <- parVals$w - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars=="mean") { - allIndPars[i, ] <- c( mean(a1[, i]), - mean(beta1[, i]), - mean(a2[, i]), - mean(beta2[, i]), - mean(pi[, i]), - mean(w[, i])) - } else if (indPars=="median") { - allIndPars[i, ] <- c( median(a1[, i]), - median(beta1[, i]), - median(a2[, i]), - median(beta2[, i]), - median(pi[, i]), - median(w[, i])) - } else if (indPars=="mode") { - allIndPars[i, ] <- c( estimate_mode(a1[, i]), - estimate_mode(beta1[, i]), - estimate_mode(a2[, i]), - estimate_mode(beta2[, i]), - estimate_mode(pi[, i]), - estimate_mode(w[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("a1", - "beta1", - "a2", - "beta2", - "pi", - "w", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file=file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData" ) ) ) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + level1_choice = level1_choice, + level2_choice = level2_choice, + reward = reward, + trans_prob = trans_prob + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/ts_par7.R b/R/ts_par7.R index 20046882..cf0bf3c8 100644 --- a/R/ts_par7.R +++ b/R/ts_par7.R @@ -1,387 +1,78 @@ -#' Two-Step Task (Daw et al., 2011, Neuron) -#' -#' @description -#' Hierarchical Bayesian Modeling of the Two-Step Task with the following 7 parameters: "a1" (learnign rate in stage 1), "a2" (learnign rate in stage 2), "beta1" (inverse temperature in stage 1), "beta2" (inverse temperature in stage 2), "pi" (perseverance), "lambda" (eligibility trace), and "w" (model-based weight).\cr\cr -#' -#' Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -#' -#' \strong{MODEL:} -#' Hybrid model (Daw et al., 2011, Neuron) with seven parameters (original model) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param trans_prob Common state transition probability from Stage (Level) 1 to Stage 2. Defaults to 0.7. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ts_par7"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Two-Step Task, there should be four columns of data with the labels -#' "subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this -#' particular order, however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"level1_choice"}}{Choice of the level 1. 1: stimulus 1, 2: stimulus 2} -#' \item{\code{"level2_choice"}}{Choice of the level 2. 1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6.} -#' \item{\code{"reward"}}{Reward of the level 2 (0 or 1)} -#' } -#' \strong{*} Note: In our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. -#' Choosing stimulus 3 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. -#' The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION ts_par7 +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +#' @templateVar TASK_NAME Two-Step Task +#' @templateVar TASK_CITE (Daw et al., 2011, Neuron) +#' @templateVar MODEL_NAME Hybrid Model (Daw et al., 2011, Neuron), with 7 parameters (original model) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "level1_choice", "level2_choice", "reward" +#' @templateVar PARAMETERS "a1" (learning rate in stage 1), "beta1" (inverse temperature in stage 1), "a2" (learning rate in stage 2), "beta2" (inverse temperature in stage 2), "pi" (perseverance), "w" (model-based weight), "lambda" (eligibility trace) +#' @templateVar ADDITIONAL_ARG \code{trans_prob}: Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7. +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"level1_choice"}{Choice made for Level (Stage) 1 (1: stimulus 1, 2: stimulus 2).} +#' @templateVar DETAILS_DATA_3 \item{"level2_choice"}{Choice made for Level (Stage) 2 (1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6).\cr *Note that, in our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. Similarly, choosing stimulus 2 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. To change this default transition probability, set the function argument \code{trans_prob} to your preferred value.} +#' @templateVar DETAILS_DATA_4 \item{"reward"}{Reward after Level 2 (0 or 1).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). Model-Based Influences on Humans' -#' Choices and Striatal Prediction Errors. Neuron, 69(6), 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- ts_par7(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' -#' -#' } - -ts_par7 <- function(data = "choose", - niter = 4000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "fixed", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10, - trans_prob = 0.7) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } else { - modelPath <- system.file("stan", "ts_par7.stan", package="hBayesDM") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data=="example") { - data <- system.file("extdata", "ts_exampleData.txt", package = "hBayesDM") - } else if (data=="choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table( data, header = T, sep="\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows, ] - cat("The number of rows with NAs=", length(NA_rows), ". They are removed prior to modeling the data. \n", sep="") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 7 - POI <- c("mu_a1", "mu_beta1", "mu_a2", "mu_beta2", "mu_pi", "mu_w", "mu_lambda", - "sigma", - "a1", "beta1", "a2", "beta2", "pi", "w", "lambda", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred_step1", "y_pred_step2") - } - - modelName <- "ts_par7" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector( rep( 0, numSubjs ) ) # number of trials for each subject - - for ( sIdx in 1:numSubjs ) { - curSubj <- subjList[ sIdx ] - Tsubj[sIdx] <- sum( rawdata$subjID == curSubj ) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - # for multiple subjects - level1_choice <- array(1, c(numSubjs, maxTrials) ) - level2_choice <- array(1, c(numSubjs, maxTrials) ) - reward <- array(0, c(numSubjs, maxTrials) ) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - level1_choice[i, 1:useTrials] <- tmp[1:useTrials, "level1_choice"] - level2_choice[i, 1:useTrials] <- tmp[1:useTrials, "level2_choice"] - reward[i, 1:useTrials] <- tmp[1:useTrials, "reward"] - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - numPars = numPars, - level1_choice = level1_choice, - level2_choice = level2_choice, - reward = reward, - trans_prob = trans_prob - ) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(0.5, 1.0, 0.5, 1.0, 1.0, 0.5, 0.5) # "a1", "beta1", "a2", "beta2", "pi", "w", "lambda" - } else { - if (length(inits)==numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), log(inits_fixed[2]), qnorm(inits_fixed[3]), log(inits_fixed[4]) , qnorm(inits_fixed[5]/5), qnorm(inits_fixed[6]), qnorm(inits_fixed[7])), - sigma = c(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0), - a1_pr = rep(qnorm(inits_fixed[1]), numSubjs), - beta1_pr = rep(log(inits_fixed[2]), numSubjs), - a2_pr = rep(qnorm(inits_fixed[3]), numSubjs), - beta2_pr = rep(log(inits_fixed[4]), numSubjs), - pi_pr = rep(qnorm(inits_fixed[5]/5), numSubjs), - w_pr = rep(qnorm(inits_fixed[6]), numSubjs), - lambda_pr = rep(qnorm(inits_fixed[7]), numSubjs) - ) +#' Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). +#' Model-Based Influences on Humans' Choices and Striatal Prediction Errors. Neuron, 69(6), +#' 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 + +ts_par7 <- hBayesDM_model( + task_name = "ts", + model_name = "par7", + data_columns = c("subjID", "level1_choice", "level2_choice", "reward"), + parameters = list("a1" = c(0, 0.5, 1), + "beta1" = c(0, 1, Inf), + "a2" = c(0, 0.5, 1), + "beta2" = c(0, 1, Inf), + "pi" = c(0, 1, 5), + "w" = c(0, 0.5, 1), + "lambda" = c(0, 0.5, 1)), + postpreds = c("y_pred_step1", "y_pred_step2"), + preprocess_func = function(raw_data, general_info, trans_prob = 0.7) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + level1_choice <- array(1, c(n_subj, t_max)) + level2_choice <- array(1, c(n_subj, t_max)) + reward <- array(0, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + level1_choice[i, 1:t] <- DT_subj$level1choice + level2_choice[i, 1:t] <- DT_subj$level2choice + reward[i, 1:t] <- DT_subj$reward } - } else { - genInitList <- "random" - } - rstan::rstan_options(auto_write = TRUE) - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning("Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.") - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$ts_par7 - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize) ) - } - # Extract the Stan fit object - parVals <- rstan::extract(fit, permuted=T) - if (inc_postpred) { - parVals$y_pred_step1[parVals$y_pred_step1==-1] <- NA - parVals$y_pred_step2[parVals$y_pred_step2==-1] <- NA - } - - a1 <- parVals$a1 - beta1 <- parVals$beta1 - a2 <- parVals$a2 - beta2 <- parVals$beta2 - pi <- parVals$pi - w <- parVals$w - lambda <- parVals$lambda - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars=="mean") { - allIndPars[i, ] <- c( mean(a1[, i]), - mean(beta1[, i]), - mean(a2[, i]), - mean(beta2[, i]), - mean(pi[, i]), - mean(w[, i]), - mean(lambda[, i]) ) - } else if (indPars=="median") { - allIndPars[i, ] <- c( median(a1[, i]), - median(beta1[, i]), - median(a2[, i]), - median(beta2[, i]), - median(pi[, i]), - median(w[, i]), - median(lambda[, i]) ) - } else if (indPars=="mode") { - allIndPars[i, ] <- c( estimate_mode(a1[, i]), - estimate_mode(beta1[, i]), - estimate_mode(a2[, i]), - estimate_mode(beta2[, i]), - estimate_mode(pi[, i]), - estimate_mode(w[, i]), - estimate_mode(lambda[, i]) ) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("a1", - "beta1", - "a2", - "beta2", - "pi", - "w", - "lambda", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file=file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData" ) ) ) + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + level1_choice = level1_choice, + level2_choice = level2_choice, + reward = reward, + trans_prob = trans_prob + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/ug_bayes.R b/R/ug_bayes.R old mode 100755 new mode 100644 index ac866e3e..eb356df3 --- a/R/ug_bayes.R +++ b/R/ug_bayes.R @@ -1,347 +1,66 @@ -#' Norm-Training Ultimatum Game -#' -#' @description -#' Hierarchical Bayesian Modeling of the Norm-Training Ultimatum Game using the following parameters: "alpha" (envy), "Beta" (guilt), "tau" (inverse temperature). -#' -#' \strong{MODEL:} -#' Ideal Observer Model (Xiang et al., 2013, J Neuro) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "offer", and "accept". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ug_bayes"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Norm-Training Ultimatum Game, there should be three columns of data -#' with the labels "subjID", "offer", and "accept". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"offer"}}{An real value representing the offer made within the given trial (e.g., 10, 11, 4, etc..).} -#' \item{\code{"accept"}}{A 1 or 0 indicating an offer was accepted or not (1 = accepted, 0 = rejected).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION ug_bayes +#' @templateVar TASK_NAME Norm-Training Ultimatum Game +#' @templateVar MODEL_NAME Ideal Observer Model +#' @templateVar MODEL_CITE (Xiang et al., 2013, J Neuro) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "offer", "accept" +#' @templateVar PARAMETERS "alpha" (envy), "beta" (guilt), "tau" (inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"offer"}{Floating point value representing the offer made in that trial (e.g. 4, 10, 11).} +#' @templateVar DETAILS_DATA_3 \item{"accept"}{1 or 0, indicating whether the offer was accepted in that trial (where accepted == 1, rejected == 0).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Xiang, T., Lohrenz, T., & Montague, P. R. (2013). Computational Substrates of Norms and Their Violations during Social Exchange. -#' Journal of Neuroscience, 33(3), 1099-1108. http://doi.org/10.1523/JNEUROSCI.1642-12.2013 -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- ug_bayes(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -ug_bayes <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "ug_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_alpha", "mu_Beta", "mu_tau", - "sigma", - "alpha", "Beta", "tau", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "ug_bayes" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - offer <- array(0, c(numSubjs, maxTrials)) - accept <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - offer[i, 1:useTrials] <- tmp$offer - accept[i, 1:useTrials] <- tmp$accept - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - offer = offer, - accept = accept -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 0.5, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1] / 20), qnorm(inits_fixed[2] / 10), qnorm(inits_fixed[3] / 10)), - sigma = c(1.0, 1.0, 1.0), - alpha_pr = rep(qnorm(inits_fixed[1]/20), numSubjs), - Beta_pr = rep(qnorm(inits_fixed[2]/10), numSubjs), - tau_pr = rep(qnorm(inits_fixed[3]/10), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') +#' Xiang, T., Lohrenz, T., & Montague, P. R. (2013). Computational Substrates of Norms and Their +#' Violations during Social Exchange. Journal of Neuroscience, 33(3), 1099-1108. +#' http://doi.org/10.1523/JNEUROSCI.1642-12.2013 + +ug_bayes <- hBayesDM_model( + task_name = "ug", + model_name = "bayes", + data_columns = c("subjID", "offer", "accept"), + parameters = list("alpha" = c(0, 1, 20), + "beta" = c(0, 0.5, 10), + "tau" = c(0, 1, 10)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + offer <- array( 0, c(n_subj, t_max)) + accept <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + offer[i, 1:t] <- DT_subj$offer + accept[i, 1:t] <- DT_subj$accept } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$ug_bayes - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + offer = offer, + accept = accept + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - alpha <- parVals$alpha - Beta <- parVals$Beta - tau <- parVals$tau - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(alpha[, i]), - mean(Beta[, i]), - mean(tau[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(alpha[, i]), - median(Beta[, i]), - median(tau[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(alpha[, i]), - estimate_mode(Beta[, i]), - estimate_mode(tau[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("alpha", - "Beta", - "tau", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/ug_delta.R b/R/ug_delta.R old mode 100755 new mode 100644 index d7b7ecd1..e285d543 --- a/R/ug_delta.R +++ b/R/ug_delta.R @@ -1,348 +1,67 @@ -#' Norm-Training Ultimatum Game -#' -#' @description -#' Hierarchical Bayesian Modeling of the Norm-Training Ultimatum Game using the following parameters: "alpha" (envy), "ep" (norm adaptation rate), "tau" (inverse temperature). -#' -#' \strong{MODEL:} -#' Rescorla-Wagner (delta) Model (Gu et al., 2015, J Neuro) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "offer", and "accept". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"ug_delta"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Norm-Training Ultimatum Game, there should be three columns of data -#' with the labels "subjID", "offer", and "accept". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"offer"}}{An real value representing the offer made within the given trial (e.g., 10, 11, 4, etc..).} -#' \item{\code{"accept"}}{A 1 or 0 indicating an offer was accepted or not (1 = accepted, 0 = rejected).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION ug_delta +#' @templateVar TASK_NAME Norm-Training Ultimatum Game +#' @templateVar MODEL_NAME Rescorla-Wagner (Delta) Model +#' @templateVar MODEL_CITE (Gu et al., 2015, J Neuro) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "offer", "accept" +#' @templateVar PARAMETERS "alpha" (envy), "tau" (inverse temperature), "ep" (norm adaptation rate) +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"offer"}{Floating point value representing the offer made in that trial (e.g. 4, 10, 11).} +#' @templateVar DETAILS_DATA_3 \item{"accept"}{1 or 0, indicating whether the offer was accepted in that trial (where accepted == 1, rejected == 0).} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R #' #' @references -#' Gu, X., Wang, X., Hula, A., Wang, S., Xu, S., Lohrenz, T. M., et al. (2015). Necessary, Yet Dissociable Contributions of the -#' Insular and Ventromedial Prefrontal Cortices to Norm Adaptation: Computational and Lesion Evidence in Humans. Journal of -#' Neuroscience, 35(2), 467-473. http://doi.org/10.1523/JNEUROSCI.2906-14.2015 -#' -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- ug_delta(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -ug_delta <- function(data = "choose", - niter = 3000, - nwarmup = 1000, - nchain = 4, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # Path to .stan model file - if (modelRegressor) { # model regressors (for model-based neuroimaging, etc.) - stop("** Model-based regressors are not available for this model **\n") - } - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "ug_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_ep", "mu_tau", "mu_alpha", - "sigma", - "ep", "tau", "alpha", - "log_lik") - - if (inc_postpred) { - POI <- c(POI, "y_pred") - } - - modelName <- "ug_delta" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - offer <- array(0, c(numSubjs, maxTrials)) - accept <- array(-1, c(numSubjs, maxTrials)) - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - useTrials <- Tsubj[i] - tmp <- subset(rawdata, rawdata$subjID == curSubj) - offer[i, 1:useTrials] <- tmp$offer - accept[i, 1:useTrials] <- tmp$accept - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - offer = offer, - accept = accept -) - - # inits - if (inits[1] != "random") { - if (inits[1] == "fixed") { - inits_fixed <- c(1.0, 0.5, 1.0) - } else { - if (length(inits) == numPars) { - inits_fixed <- inits - } else { - stop("Check your inital values!") - } - } - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]/10), qnorm(inits_fixed[3]/20)), - sigma = c(1.0, 1.0, 1.0), - ep_pr = rep(qnorm(inits_fixed[1]), numSubjs), - tau_pr = rep(qnorm(inits_fixed[2]/10), numSubjs), - alpha_pr = rep(qnorm(inits_fixed[3]/20), numSubjs) -) - } - } else { - genInitList <- "random" - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') +#' Gu, X., Wang, X., Hula, A., Wang, S., Xu, S., Lohrenz, T. M., et al. (2015). Necessary, Yet +#' Dissociable Contributions of the Insular and Ventromedial Prefrontal Cortices to Norm +#' Adaptation: Computational and Lesion Evidence in Humans. Journal of Neuroscience, 35(2), +#' 467-473. http://doi.org/10.1523/JNEUROSCI.2906-14.2015 + +ug_delta <- hBayesDM_model( + task_name = "ug", + model_name = "delta", + data_columns = c("subjID", "offer", "accept"), + parameters = list("alpha" = c(0, 1, 20), + "tau" = c(0, 1, 10), + "ep" = c(0, 0.5, 1)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs + t_max <- general_info$t_max + + # Initialize (model-specific) data arrays + offer <- array( 0, c(n_subj, t_max)) + accept <- array(-1, c(n_subj, t_max)) + + # Write from raw_data to the data arrays + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + + offer[i, 1:t] <- DT_subj$offer + accept[i, 1:t] <- DT_subj$accept } - else{ - options(mc.cores = ncore) - } - } - else { - options(mc.cores = 1) - } - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$ug_delta - if (vb) { # if variational Bayesian - fit = rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit = rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) { - parVals$y_pred[parVals$y_pred == -1] <- NA + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + offer = offer, + accept = accept + ) + + # Returned data_list will directly be passed to Stan + return(data_list) } +) - ep <- parVals$ep - tau <- parVals$tau - alpha <- parVals$alpha - - # Individual parameters (e.g., individual posterior means) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - if (indPars == "mean") { - allIndPars[i,] <- c(mean(ep[, i]), - mean(tau[, i]), - mean(alpha[, i])) - } else if (indPars == "median") { - allIndPars[i,] <- c(median(ep[, i]), - median(tau[, i]), - median(alpha[, i])) - } else if (indPars == "mode") { - allIndPars[i,] <- c(estimate_mode(ep[, i]), - estimate_mode(tau[, i]), - estimate_mode(alpha[, i])) - } - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("ep", - "tau", - "alpha", - "subjID") - - # Wrap up data into a list - modelData <- list(modelName, allIndPars, parVals, fit, rawdata) - names(modelData) <- c("model", "allIndPars", "parVals", "fit", "rawdata") - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) -} diff --git a/R/wcs_sql.R b/R/wcs_sql.R old mode 100755 new mode 100644 index a632cfed..04ad768e --- a/R/wcs_sql.R +++ b/R/wcs_sql.R @@ -1,378 +1,91 @@ -#' Wisconsin Card Sorting Task -#' -#' @description -#' Hierarchical Bayesian Modeling of the Wisconsin Card Sorting (WCS) Task using the following parameters: "r" (reward sensitivity), "p" (punishment sensitivity), and "d" (decision consistency or inverse temperature). -#' -#' Contributor: \href{https://ccs-lab.github.io/team/dayeong-min/}{Dayeong Min} -#' -#' \strong{MODEL:} -#' Sequential Learning Model (Bishara et al., 2010, Journal of Mathematical Psychology) -#' -#' @param data A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice" and "outcome". See \bold{Details} below for more information. -#' @param niter Number of iterations, including warm-up. -#' @param nwarmup Number of iterations used for warm-up only. -#' @param nchain Number of chains to be run. -#' @param ncore Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1. -#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high. -#' @param inits Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values. -#' @param indPars Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode". -#' @param saveDir Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested. -#' @param modelRegressor Exporting model-based regressors? TRUE or FALSE. Currently not available for this model. -#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE. -#' @param inc_postpred Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE. -#' @param adapt_delta Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. -#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' @param max_treedepth Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below. -#' -#' @return \code{modelData} A class \code{"hBayesDM"} object with the following components: -#' \describe{ -#' \item{\code{model}}{Character string with the name of the model (\code{"wcs_sql"}).} -#' \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter -#' values (as specified by \code{"indPars"}) for each subject.} -#' \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples -#' over different model parameters. } -#' \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} -#' \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} -#' } -#' -#' @importFrom rstan vb sampling stan_model rstan_options extract -#' @importFrom parallel detectCores -#' @importFrom stats median qnorm density -#' @importFrom utils read.table -#' -#' @details -#' This section describes some of the function arguments in greater detail. -#' -#' \strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -#' (e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -#' The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -#' represent variables. For the Wisconsin Card Sorting Task, there should be at least three columns of data -#' with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -#' however it is necessary that they be labelled correctly and contain the information below: -#' \describe{ -#' \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} -#' \item{\code{"choice"}}{An integer value representing the chosen choice option of deck within the given trial (e.g., 1 to 4 in WCST).} -#' \item{\code{"outcome"}}{A 1 or 0 for outcome within each given trial (1 = correct, 0 = wrong).} -#' } -#' \strong{*}Note: The data.txt file may contain other columns of data (e.g. "rule", "trial", etc.), but only the data with the column -#' names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -#' there is no need to remove other miscellaneous data columns. -#' -#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -#' beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -#' Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -#' on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -#' effects that initial values have on the resulting posteriors. -#' -#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -#' used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -#' process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -#' sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -#' command. The chains should resemble a "furry caterpillar". -#' -#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -#' to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -#' generate the posterior. -#' -#' \strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -#' over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -#' can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -#' more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -#' \href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +#' @templateVar MODEL_FUNCTION wcs_sql +#' @templateVar CONTRIBUTOR \href{https://ccs-lab.github.io/team/dayeong-min/}{Dayeong Min} +#' @templateVar TASK_NAME Wisconsin Card Sorting Task +#' @templateVar MODEL_NAME Sequential Learning Model +#' @templateVar MODEL_CITE (Bishara et al., 2010, Journal of Mathematical Psychology) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "outcome" +#' @templateVar PARAMETERS "r" (reward sensitivity), "p" (punishment sensitivity), "d" (decision consistency or inverse temperature) +#' @templateVar LENGTH_DATA_COLUMNS 3 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer value indicating which deck was chosen on that trial: 1, 2, 3, or 4.} +#' @templateVar DETAILS_DATA_3 \item{"outcome"}{1 or 0, indicating the outcome of that trial: correct == 1, wrong == 0.} +#' +#' @template model-documentation #' #' @export +#' @include hBayesDM_model.R +#' @importFrom utils read.table #' #' @references -#' Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -#' Journal of Machine Learning Research, 15(1), 1593-1623. -#' -#' Bishara, A. J., Kruschke, J. K., Stout, J. C., Bechara, A., McCabe, D. P., & Busemeyer, J. R. (2010). -#' Sequential learning models for the Wisconsin card sort task: Assessing processes in substance dependent individuals. -#' Journal of mathematical psychology, 54(1), 5-13. -#' -#' @seealso -#' We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} -#' -#' @examples -#' \dontrun{ -#' # Run the model and store results in "output" -#' output <- wcs_sql(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) -#' -#' # Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -#' plot(output, type = 'trace') -#' -#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) -#' rhat(output) -#' -#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) -#' plot(output) -#' -#' # Show the WAIC and LOOIC model fit estimates -#' printFit(output) -#' } - -wcs_sql <- function(data = "choice", - niter = 3000, - nwarmup = 1000, - nchain = 1, - ncore = 1, - nthin = 1, - inits = "random", - indPars = "mean", - saveDir = NULL, - modelRegressor = FALSE, - vb = FALSE, - inc_postpred = FALSE, - adapt_delta = 0.95, - stepsize = 1, - max_treedepth = 10) { - - # To see how long computations take - startTime <- Sys.time() - - # For using example data - if (data == "example") { - data <- system.file("extdata", "wcs_exampleData.txt", package = "hBayesDM") - } else if (data == "choose") { - data <- file.choose() - } - - # Load data - if (file.exists(data)) { - rawdata <- read.table(data, header = T, sep = "\t") - answer <- system.file("extdata", "wcs_answersheet.txt", package = "hBayesDM") - answer_dim <- read.table(answer, header = T) - } else { - stop("** The data file does not exist. Please check it again. **\n e.g., data = '/MyFolder/SubFolder/dataFile.txt', ... **\n") - } - - # Remove rows containing NAs - NA_rows_all = which(is.na(rawdata), arr.ind = T) # rows with NAs - NA_rows = unique(NA_rows_all[, "row"]) - if (length(NA_rows) > 0) { - rawdata = rawdata[-NA_rows,] - cat("The number of rows with NAs = ", length(NA_rows), ". They are removed prior to modeling the data. \n", sep = "") - } - - # Individual Subjects - subjList <- unique(rawdata[,"subjID"]) # list of subjects x blocks - numSubjs <- length(subjList) # number of subjects - - # Specify the number of parameters and parameters of interest - numPars <- 3 - POI <- c("mu_r", "mu_p", "mu_d", - "sigma", - "r", "p", "d", - "log_lik") - - if (inc_postpred){ - POI <- c(POI, "y_pred") - } - - modelName <- "wcs_sql" - - # Information for user - cat("\nModel name = ", modelName, "\n") - cat("Data file = ", data, "\n") - cat("\nDetails:\n") - if (vb) { - cat(" # Using variational inference # \n") - } else { - cat(" # of chains = ", nchain, "\n") - cat(" # of cores used = ", ncore, "\n") - cat(" # of MCMC samples (per chain) = ", niter, "\n") - cat(" # of burn-in samples = ", nwarmup, "\n") - } - cat(" # of subjects = ", numSubjs, "\n") - - ################################################################################ - # THE DATA. ################################################################### - ################################################################################ - - - # Setting maxTrials - Tsubj <- as.vector(rep(0, numSubjs)) # number of trials for each subject - - for (i in 1:numSubjs) { - curSubj <- subjList[i] - Tsubj[i] <- sum(rawdata$subjID == curSubj) # Tsubj[N] - } - - # Setting maxTrials - maxTrials <- 128 - #maxTrials <- max(Tsubj) - - # Information for user continued - cat(" # of (max) trials per subject = ", maxTrials, "\n\n") - - - # Arrange data to match stan format - choice <- array(0, c(numSubjs,4,maxTrials)) # subject's deck choice within a trial (1, 2, 3 and 4) - outcome <- matrix(-1, numSubjs, maxTrials) # whether subject's choice is correct or not within a trial (1 and 0) - choice_match_att <- array(0, c(numSubjs,maxTrials,1,3)) # display a vector that indicates which dimension the chosen card matches to within a trial - deck_match_rule <- array(0, c(maxTrials,3,4)) # display a matrix that indicates which dimension(color, form, number) each of the 4 decks matches to within a trial - - # choice - for(sbj in 1:numSubjs){ - for(trial in 1:Tsubj[sbj]){ - for(decknum in 1:4){ - if(rawdata[rawdata$subjID==sbj,"choice"][trial] == decknum) choice[sbj,decknum,trial] = 1 +#' Bishara, A. J., Kruschke, J. K., Stout, J. C., Bechara, A., McCabe, D. P., & Busemeyer, J. R. +#' (2010). Sequential learning models for the Wisconsin card sort task: Assessing processes in +#' substance dependent individuals. Journal of Mathematical Psychology, 54(1), 5-13. + +wcs_sql <- hBayesDM_model( + task_name = "wcs", + model_name = "sql", + data_columns = c("subjID", "choice", "outcome"), + parameters = list("r" = c(0, 0.1, 1), + "p" = c(0, 0.1, 1), + "d" = c(0, 1, 5)), + preprocess_func = function(raw_data, general_info) { + # Currently class(raw_data) == "data.table" + + # Use general_info of raw_data + subjs <- general_info$subjs + n_subj <- general_info$n_subj + t_subjs <- general_info$t_subjs +# t_max <- general_info$t_max + t_max <- 128 + + # Read predefined answer sheet + answersheet <- system.file("extdata", "wcs_answersheet.txt", package = "hBayesDM") + answer <- read.table(answersheet, header = TRUE) + + # Initialize data arrays + choice <- array( 0, c(n_subj, 4, t_max)) + outcome <- array(-1, c(n_subj, t_max)) + choice_match_att <- array( 0, c(n_subj, t_max, 1, 3)) # Info about chosen deck (per each trial) + deck_match_rule <- array( 0, c(t_max, 3, 4)) # Info about all 4 decks (per each trial) + + # Write: choice, outcome, choice_match_att + for (i in 1:n_subj) { + subj <- subjs[i] + t <- t_subjs[i] + DT_subj <- raw_data[subjid == subj] + DT_subj_choice <- DT_subj$choice + DT_subj_outcome <- DT_subj$outcome + + for (tr in 1:t) { + ch <- DT_subj_choice[tr] + ou <- DT_subj_outcome[tr] + choice[i, ch, tr] <- 1 + outcome[i, tr] <- ou + choice_match_att[i, tr, 1, ] <- answer[, tr] == ch } } - } - - # outcome - for (sbj in 1:numSubjs){ - outcome[sbj,1:Tsubj[sbj]] <- rawdata[rawdata$subjID==sbj, "outcome"] - } - - - # choice_match_att; - for(sbj in 1:numSubjs){ - for(trial in 1:Tsubj[sbj]){ - choice_match_att[sbj,trial,,] <- answer_dim[,trial] == rawdata[rawdata$subjID==sbj, "choice"][trial] - } - } - # deck_match_rule - for(trial in 1:maxTrials){ - for(rule in 1:3){ - for(decknum in 1:4){ - if(answer_dim[rule,trial] == decknum) deck_match_rule[trial, rule, decknum] = 1 + # Write: deck_match_rule + for (tr in 1:t_max) { + for (ru in 1:3) { + deck_match_rule[tr, ru, answer[ru, tr]] <- 1 } } - } - - dataList <- list( - N = numSubjs, - T = maxTrials, - Tsubj = Tsubj, - choice = choice, - outcome = outcome, + # Wrap into a list for Stan + data_list <- list( + N = n_subj, + T = t_max, + Tsubj = t_subjs, + choice = choice, + outcome = outcome, choice_match_att = choice_match_att, - numPars = numPars + deck_match_rule = deck_match_rule ) - # inits - if (inits[1] == "random") { - genInitList <- "random" - } else { - if (inits[1] == "fixed") { - inits_fixed <- c(0.1, 0.1, 1.0) - } else { - if (length(inits) == numPars) - inits_fixed <- inits - else - stop("Check your inital values!") - } - - genInitList <- function() { - list( - mu_p = c(qnorm(inits_fixed[1]), qnorm(inits_fixed[2]), qnorm(inits_fixed[3] / 10)), - sigma = c(1.0, 1.0, 1.0), - r_pr = rep(qnorm(inits_fixed[1]), numSubjs), - p_pr = rep(qnorm(inits_fixed[2]), numSubjs), - d_pr = rep(qnorm(inits_fixed[3] / 10), numSubjs) - ) - } - } - - if (ncore > 1) { - numCores <- parallel::detectCores() - - if (numCores < ncore) { - options(mc.cores = numCores) - warning('Number of cores specified for parallel computing greater than number of locally available cores. Using all locally available cores.') - } else { - options(mc.cores = ncore) - } - } else { - options(mc.cores = 1) - } - - cat("***********************************\n") - cat("** Loading a precompiled model **\n") - cat("***********************************\n") - - # Fit the Stan model - m = stanmodels$wcs_sql - if (vb) { # if variational Bayesian - fit <- rstan::vb(m, - data = dataList, - pars = POI, - init = genInitList) - } else { - fit <- rstan::sampling(m, - data = dataList, - pars = POI, - warmup = nwarmup, - init = genInitList, - iter = niter, - chains = nchain, - thin = nthin, - control = list(adapt_delta = adapt_delta, - max_treedepth = max_treedepth, - stepsize = stepsize)) - } - - ## Extract parameters - parVals <- rstan::extract(fit, permuted = T) - if (inc_postpred) - parVals$y_pred[parVals$y_pred == -1] <- NA - - r <- parVals$r - p <- parVals$p - d <- parVals$d - - # Individual parameters (e.g., individual posterior means) - measureIndPars <- switch(indPars, mean=mean, median=median, mode=estimate_mode) - allIndPars <- array(NA, c(numSubjs, numPars)) - allIndPars <- as.data.frame(allIndPars) - - for (i in 1:numSubjs) { - allIndPars[i,] <- c(measureIndPars(r[, i]), - measureIndPars(p[, i]), - measureIndPars(d[, i])) - } - - allIndPars <- cbind(allIndPars, subjList) - colnames(allIndPars) <- c("r", - "p", - "d", - "subjID") - - # Wrap up data into a list - modelData <- list() - modelData$model <- modelName - modelData$allIndPars <- allIndPars - modelData$parVals <- parVals - modelData$fit <- fit - modelData$rawdata <- rawdata - modelData$modelRegressor <- NA - - class(modelData) <- "hBayesDM" - - # Total time of computations - endTime <- Sys.time() - timeTook <- endTime - startTime - - # If saveDir is specified, save modelData as a file. If not, don't save - # Save each file with its model name and time stamp (date & time (hr & min)) - if (!is.null(saveDir)) { - currTime <- Sys.time() - currDate <- Sys.Date() - currHr <- substr(currTime, 12, 13) - currMin <- substr(currTime, 15, 16) - timeStamp <- paste0(currDate, "_", currHr, "_", currMin) - dataFileName = sub(pattern = "(.*)\\..*$", replacement = "\\1", basename(data)) - save(modelData, file = file.path(saveDir, paste0(modelName, "_", dataFileName, "_", timeStamp, ".RData"))) - } - - # Inform user of completion - cat("\n************************************\n") - cat("**** Model fitting is complete! ****\n") - cat("************************************\n") - - return(modelData) + # Returned data_list will directly be passed to Stan + return(data_list) } +) + diff --git a/R/zzz.R b/R/zzz.R old mode 100755 new mode 100644 index a481b4a0..65d3e4a0 --- a/R/zzz.R +++ b/R/zzz.R @@ -1,11 +1,14 @@ #' @noRd - + .onAttach <- function(libname, pkgname) { ver <- utils::packageVersion("hBayesDM") packageStartupMessage("\n\nThis is hBayesDM version ", ver, "\n\n") } -.onLoad <- function(libname, pkgname) { - modules <- paste0("stan_fit4", names(stanmodels), "_mod") - for (m in modules) loadModule(m, what = TRUE) -} \ No newline at end of file +.onLoad <- function(libname, pkgname) { # nocov start + if (FLAG_BUILD_ALL) { + modules <- paste0("stan_fit4", names(stanmodels), "_mod") + for (m in modules) loadModule(m, what = TRUE) + } +} # nocov end + diff --git a/README.md b/README.md index 3aa56762..2e9dcca7 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![Project Status: Active – The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) [![Build Status](https://travis-ci.org/CCS-Lab/hBayesDM.svg?branch=master)](https://travis-ci.org/CCS-Lab/hBayesDM) [![CRAN Latest Release](https://www.r-pkg.org/badges/version-last-release/hBayesDM)](https://cran.r-project.org/package=hBayesDM) -[![Downloads](https://cranlogs.r-pkg.org/badges/grand-total/hBayesDM)](https://cran.rstudio.com/web/packages/rstan/index.html) +[![Downloads](https://cranlogs.r-pkg.org/badges/grand-total/hBayesDM)](https://cran.r-project.org/web/packages/hBayesDM/index.html) @@ -20,32 +20,50 @@ https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started [rstan]: https://github.com/stan-dev/rstan -For the moment, RStan requires you to specify that the C++14 standard should be used to compile Stan programs (based on [this link][rstan-loading]): -```r -Sys.setenv(USE_CXX14 = 1) -library("rstan") # observe startup messages -``` - -[rstan-loading]: https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started#loading-the-package - ### Installation hBayesDM can be installed from CRAN by running the following command in R: ```r -install.packages('hBayesDM') # Install hBayesDM from CRAN +install.packages("hBayesDM") # Install hBayesDM from CRAN ``` -**We strongly recommend users to install hBayesDM from GitHub**. All models in this GitHub version are precompiled, which saves time for compiling Stan models. However, it may cause some memory allocation issues on a Windows machine. - -You can install the latest version from GitHub with: +or you can also install via GitHub with: ```r # `devtools` is required to install hBayesDM from GitHub if (!require(devtools)) install.packages("devtools") + devtools::install_github("CCS-Lab/hBayesDM") ``` +#### Building at once + +In default, you should build a Stan file into a binary for the first time to use the +model, so it can be quite bothersome. +In order to build all the models at once, you should set an environmental variable +`BUILD_ALL` to `true`. +We highly recommend you to use multiple cores for build, since it requires quite +a long time to complete. + +```r +Sys.setenv(BUILD_ALL='true') # Build all the models on installation +Sys.setenv(MAKEFLAGS='-j 4') # Use 4 cores for compilation (or the number you want) + +install.packages("hBayesDM") # Install from CRAN +# or +devtools::install_github("CCS-Lab/hBayesDM") # Install from GitHub +``` + +### Caveats + +Before you load `hBayesDM`, you should load `rstan` to make sampling properly work. + +```r +library(rstan) +library(hBayesDM) +``` + ### Quick Links - **Tutorial**: http://rpubs.com/CCSL/hBayesDM diff --git a/_pkgdown.yml b/_pkgdown.yml new file mode 100644 index 00000000..60f4d379 --- /dev/null +++ b/_pkgdown.yml @@ -0,0 +1,35 @@ +reference: +- title: Tasks & Models + contents: + - starts_with("bandit") + - starts_with("bart_") + - starts_with("choiceRT_") + - starts_with("cra_") + - starts_with("dbdm_") + - starts_with("dd_") + - starts_with("gng_") + - starts_with("igt_") + - starts_with("peer_") + - starts_with("prl_") + - starts_with("pst_") + - starts_with("ra_") + - starts_with("rdt_") + - starts_with("ts_") + - starts_with("ug_") + - starts_with("wcs_") +- title: Functions + contents: + - estimate_mode + - extract_ic + - HDIofMCMC + - multiplot + - plotDist + - plotHDI + - plotInd + - printFit + - rhat +- title: Internal functions + desc: > + These functions are for the developmental purpose. + contents: + - hBayesDM_model diff --git a/cleanup b/cleanup deleted file mode 100755 index 43d3ee97..00000000 --- a/cleanup +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -e - -# Note to Windows users: This is not actually platform specific. -mkdir -p src/include -"${R_HOME}/bin/R" --vanilla --slave --file=tools/make_cpp.R -sed -i.bak '/current_statement_begin__ = [0-9]/d' src/include/models.hpp -cat src/*.cc > src/Modules.cpp -rm src/*.cc -exit $? diff --git a/cleanup.win b/cleanup.win deleted file mode 100755 index 61db8ac2..00000000 --- a/cleanup.win +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -e - -# Note to Windows users: This is not actually platform specific. -mkdir -p src/include -"${R_HOME}/bin/R" --vanilla --slave --file=tools/make_cpp.R - -cat src/*.cc > src/Modules.cpp -rm src/*.cc -exit $? diff --git a/cran-comments.md b/cran-comments.md new file mode 100644 index 00000000..6f4af477 --- /dev/null +++ b/cran-comments.md @@ -0,0 +1,25 @@ +## Test environments + +* Local mac OS install, R 3.5.1 +* Local Ubuntu 16.04 install, R 3.4.4 +* Ubuntu 14.04 (on Travis CI), R 3.5.1 + +## R CMD check results + +There were 2 NOTES: + +* checking installed package size ... NOTE + installed size is 6.0Mb + sub-directories of 1Mb or more: + extdata 1.2Mb + R 1.5Mb + stan_files 2.8Mb + + hBayesDM use the Stan files for models. + +* checking CRAN incoming feasibility ... NOTE + Maintainer: 'Woo-Young Ahn ' + + GNU make is a SystemRequirements. + + To compile hBayesDM using rstan, GNU make is required. diff --git a/docs/LICENSE-text.html b/docs/LICENSE-text.html new file mode 100644 index 00000000..2e685f75 --- /dev/null +++ b/docs/LICENSE-text.html @@ -0,0 +1,717 @@ + + + + + + + + +License • hBayesDM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+
+ + +
GNU General Public License
+==========================
+
+_Version 3, 29 June 2007_  
+_Copyright © 2007 Free Software Foundation, Inc. &lt;<http://fsf.org/>&gt;_
+
+Everyone is permitted to copy and distribute verbatim copies of this license
+document, but changing it is not allowed.
+
+## Preamble
+
+The GNU General Public License is a free, copyleft license for software and other
+kinds of works.
+
+The licenses for most software and other practical works are designed to take away
+your freedom to share and change the works. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change all versions of a
+program--to make sure it remains free software for all its users. We, the Free
+Software Foundation, use the GNU General Public License for most of our software; it
+applies also to any other work released this way by its authors. You can apply it to
+your programs, too.
+
+When we speak of free software, we are referring to freedom, not price. Our General
+Public Licenses are designed to make sure that you have the freedom to distribute
+copies of free software (and charge for them if you wish), that you receive source
+code or can get it if you want it, that you can change the software or use pieces of
+it in new free programs, and that you know you can do these things.
+
+To protect your rights, we need to prevent others from denying you these rights or
+asking you to surrender the rights. Therefore, you have certain responsibilities if
+you distribute copies of the software, or if you modify it: responsibilities to
+respect the freedom of others.
+
+For example, if you distribute copies of such a program, whether gratis or for a fee,
+you must pass on to the recipients the same freedoms that you received. You must make
+sure that they, too, receive or can get the source code. And you must show them these
+terms so they know their rights.
+
+Developers that use the GNU GPL protect your rights with two steps: **(1)** assert
+copyright on the software, and **(2)** offer you this License giving you legal permission
+to copy, distribute and/or modify it.
+
+For the developers' and authors' protection, the GPL clearly explains that there is
+no warranty for this free software. For both users' and authors' sake, the GPL
+requires that modified versions be marked as changed, so that their problems will not
+be attributed erroneously to authors of previous versions.
+
+Some devices are designed to deny users access to install or run modified versions of
+the software inside them, although the manufacturer can do so. This is fundamentally
+incompatible with the aim of protecting users' freedom to change the software. The
+systematic pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we have designed
+this version of the GPL to prohibit the practice for those products. If such problems
+arise substantially in other domains, we stand ready to extend this provision to
+those domains in future versions of the GPL, as needed to protect the freedom of
+users.
+
+Finally, every program is threatened constantly by software patents. States should
+not allow patents to restrict development and use of software on general-purpose
+computers, but in those that do, we wish to avoid the special danger that patents
+applied to a free program could make it effectively proprietary. To prevent this, the
+GPL assures that patents cannot be used to render the program non-free.
+
+The precise terms and conditions for copying, distribution and modification follow.
+
+## TERMS AND CONDITIONS
+
+### 0. Definitions
+
+“This License” refers to version 3 of the GNU General Public License.
+
+“Copyright” also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+“The Program” refers to any copyrightable work licensed under this
+License. Each licensee is addressed as “you”. “Licensees” and
+“recipients” may be individuals or organizations.
+
+To “modify” a work means to copy from or adapt all or part of the work in
+a fashion requiring copyright permission, other than the making of an exact copy. The
+resulting work is called a “modified version” of the earlier work or a
+work “based on” the earlier work.
+
+A “covered work” means either the unmodified Program or a work based on
+the Program.
+
+To “propagate” a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for infringement under
+applicable copyright law, except executing it on a computer or modifying a private
+copy. Propagation includes copying, distribution (with or without modification),
+making available to the public, and in some countries other activities as well.
+
+To “convey” a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through a computer
+network, with no transfer of a copy, is not conveying.
+
+An interactive user interface displays “Appropriate Legal Notices” to the
+extent that it includes a convenient and prominently visible feature that **(1)**
+displays an appropriate copyright notice, and **(2)** tells the user that there is no
+warranty for the work (except to the extent that warranties are provided), that
+licensees may convey the work under this License, and how to view a copy of this
+License. If the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+### 1. Source Code
+
+The “source code” for a work means the preferred form of the work for
+making modifications to it. “Object code” means any non-source form of a
+work.
+
+A “Standard Interface” means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of interfaces
+specified for a particular programming language, one that is widely used among
+developers working in that language.
+
+The “System Libraries” of an executable work include anything, other than
+the work as a whole, that **(a)** is included in the normal form of packaging a Major
+Component, but which is not part of that Major Component, and **(b)** serves only to
+enable use of the work with that Major Component, or to implement a Standard
+Interface for which an implementation is available to the public in source code form.
+A “Major Component”, in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system (if any) on which
+the executable work runs, or a compiler used to produce the work, or an object code
+interpreter used to run it.
+
+The “Corresponding Source” for a work in object code form means all the
+source code needed to generate, install, and (for an executable work) run the object
+code and to modify the work, including scripts to control those activities. However,
+it does not include the work's System Libraries, or general-purpose tools or
+generally available free programs which are used unmodified in performing those
+activities but which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for the work, and
+the source code for shared libraries and dynamically linked subprograms that the work
+is specifically designed to require, such as by intimate data communication or
+control flow between those subprograms and other parts of the work.
+
+The Corresponding Source need not include anything that users can regenerate
+automatically from other parts of the Corresponding Source.
+
+The Corresponding Source for a work in source code form is that same work.
+
+### 2. Basic Permissions
+
+All rights granted under this License are granted for the term of copyright on the
+Program, and are irrevocable provided the stated conditions are met. This License
+explicitly affirms your unlimited permission to run the unmodified Program. The
+output from running a covered work is covered by this License only if the output,
+given its content, constitutes a covered work. This License acknowledges your rights
+of fair use or other equivalent, as provided by copyright law.
+
+You may make, run and propagate covered works that you do not convey, without
+conditions so long as your license otherwise remains in force. You may convey covered
+works to others for the sole purpose of having them make modifications exclusively
+for you, or provide you with facilities for running those works, provided that you
+comply with the terms of this License in conveying all material for which you do not
+control copyright. Those thus making or running the covered works for you must do so
+exclusively on your behalf, under your direction and control, on terms that prohibit
+them from making any copies of your copyrighted material outside their relationship
+with you.
+
+Conveying under any other circumstances is permitted solely under the conditions
+stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
+
+### 3. Protecting Users' Legal Rights From Anti-Circumvention Law
+
+No covered work shall be deemed part of an effective technological measure under any
+applicable law fulfilling obligations under article 11 of the WIPO copyright treaty
+adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention
+of such measures.
+
+When you convey a covered work, you waive any legal power to forbid circumvention of
+technological measures to the extent such circumvention is effected by exercising
+rights under this License with respect to the covered work, and you disclaim any
+intention to limit operation or modification of the work as a means of enforcing,
+against the work's users, your or third parties' legal rights to forbid circumvention
+of technological measures.
+
+### 4. Conveying Verbatim Copies
+
+You may convey verbatim copies of the Program's source code as you receive it, in any
+medium, provided that you conspicuously and appropriately publish on each copy an
+appropriate copyright notice; keep intact all notices stating that this License and
+any non-permissive terms added in accord with section 7 apply to the code; keep
+intact all notices of the absence of any warranty; and give all recipients a copy of
+this License along with the Program.
+
+You may charge any price or no price for each copy that you convey, and you may offer
+support or warranty protection for a fee.
+
+### 5. Conveying Modified Source Versions
+
+You may convey a work based on the Program, or the modifications to produce it from
+the Program, in the form of source code under the terms of section 4, provided that
+you also meet all of these conditions:
+
+* **a)** The work must carry prominent notices stating that you modified it, and giving a
+relevant date.
+* **b)** The work must carry prominent notices stating that it is released under this
+License and any conditions added under section 7. This requirement modifies the
+requirement in section 4 to “keep intact all notices”.
+* **c)** You must license the entire work, as a whole, under this License to anyone who
+comes into possession of a copy. This License will therefore apply, along with any
+applicable section 7 additional terms, to the whole of the work, and all its parts,
+regardless of how they are packaged. This License gives no permission to license the
+work in any other way, but it does not invalidate such permission if you have
+separately received it.
+* **d)** If the work has interactive user interfaces, each must display Appropriate Legal
+Notices; however, if the Program has interactive interfaces that do not display
+Appropriate Legal Notices, your work need not make them do so.
+
+A compilation of a covered work with other separate and independent works, which are
+not by their nature extensions of the covered work, and which are not combined with
+it such as to form a larger program, in or on a volume of a storage or distribution
+medium, is called an “aggregate” if the compilation and its resulting
+copyright are not used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work in an aggregate
+does not cause this License to apply to the other parts of the aggregate.
+
+### 6. Conveying Non-Source Forms
+
+You may convey a covered work in object code form under the terms of sections 4 and
+5, provided that you also convey the machine-readable Corresponding Source under the
+terms of this License, in one of these ways:
+
+* **a)** Convey the object code in, or embodied in, a physical product (including a
+physical distribution medium), accompanied by the Corresponding Source fixed on a
+durable physical medium customarily used for software interchange.
+* **b)** Convey the object code in, or embodied in, a physical product (including a
+physical distribution medium), accompanied by a written offer, valid for at least
+three years and valid for as long as you offer spare parts or customer support for
+that product model, to give anyone who possesses the object code either **(1)** a copy of
+the Corresponding Source for all the software in the product that is covered by this
+License, on a durable physical medium customarily used for software interchange, for
+a price no more than your reasonable cost of physically performing this conveying of
+source, or **(2)** access to copy the Corresponding Source from a network server at no
+charge.
+* **c)** Convey individual copies of the object code with a copy of the written offer to
+provide the Corresponding Source. This alternative is allowed only occasionally and
+noncommercially, and only if you received the object code with such an offer, in
+accord with subsection 6b.
+* **d)** Convey the object code by offering access from a designated place (gratis or for
+a charge), and offer equivalent access to the Corresponding Source in the same way
+through the same place at no further charge. You need not require recipients to copy
+the Corresponding Source along with the object code. If the place to copy the object
+code is a network server, the Corresponding Source may be on a different server
+(operated by you or a third party) that supports equivalent copying facilities,
+provided you maintain clear directions next to the object code saying where to find
+the Corresponding Source. Regardless of what server hosts the Corresponding Source,
+you remain obligated to ensure that it is available for as long as needed to satisfy
+these requirements.
+* **e)** Convey the object code using peer-to-peer transmission, provided you inform
+other peers where the object code and Corresponding Source of the work are being
+offered to the general public at no charge under subsection 6d.
+
+A separable portion of the object code, whose source code is excluded from the
+Corresponding Source as a System Library, need not be included in conveying the
+object code work.
+
+A “User Product” is either **(1)** a “consumer product”, which
+means any tangible personal property which is normally used for personal, family, or
+household purposes, or **(2)** anything designed or sold for incorporation into a
+dwelling. In determining whether a product is a consumer product, doubtful cases
+shall be resolved in favor of coverage. For a particular product received by a
+particular user, “normally used” refers to a typical or common use of
+that class of product, regardless of the status of the particular user or of the way
+in which the particular user actually uses, or expects or is expected to use, the
+product. A product is a consumer product regardless of whether the product has
+substantial commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+“Installation Information” for a User Product means any methods,
+procedures, authorization keys, or other information required to install and execute
+modified versions of a covered work in that User Product from a modified version of
+its Corresponding Source. The information must suffice to ensure that the continued
+functioning of the modified object code is in no case prevented or interfered with
+solely because modification has been made.
+
+If you convey an object code work under this section in, or with, or specifically for
+use in, a User Product, and the conveying occurs as part of a transaction in which
+the right of possession and use of the User Product is transferred to the recipient
+in perpetuity or for a fixed term (regardless of how the transaction is
+characterized), the Corresponding Source conveyed under this section must be
+accompanied by the Installation Information. But this requirement does not apply if
+neither you nor any third party retains the ability to install modified object code
+on the User Product (for example, the work has been installed in ROM).
+
+The requirement to provide Installation Information does not include a requirement to
+continue to provide support service, warranty, or updates for a work that has been
+modified or installed by the recipient, or for the User Product in which it has been
+modified or installed. Access to a network may be denied when the modification itself
+materially and adversely affects the operation of the network or violates the rules
+and protocols for communication across the network.
+
+Corresponding Source conveyed, and Installation Information provided, in accord with
+this section must be in a format that is publicly documented (and with an
+implementation available to the public in source code form), and must require no
+special password or key for unpacking, reading or copying.
+
+### 7. Additional Terms
+
+“Additional permissions” are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions. Additional
+permissions that are applicable to the entire Program shall be treated as though they
+were included in this License, to the extent that they are valid under applicable
+law. If additional permissions apply only to part of the Program, that part may be
+used separately under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+When you convey a copy of a covered work, you may at your option remove any
+additional permissions from that copy, or from any part of it. (Additional
+permissions may be written to require their own removal in certain cases when you
+modify the work.) You may place additional permissions on material, added by you to a
+covered work, for which you have or can give appropriate copyright permission.
+
+Notwithstanding any other provision of this License, for material you add to a
+covered work, you may (if authorized by the copyright holders of that material)
+supplement the terms of this License with terms:
+
+* **a)** Disclaiming warranty or limiting liability differently from the terms of
+sections 15 and 16 of this License; or
+* **b)** Requiring preservation of specified reasonable legal notices or author
+attributions in that material or in the Appropriate Legal Notices displayed by works
+containing it; or
+* **c)** Prohibiting misrepresentation of the origin of that material, or requiring that
+modified versions of such material be marked in reasonable ways as different from the
+original version; or
+* **d)** Limiting the use for publicity purposes of names of licensors or authors of the
+material; or
+* **e)** Declining to grant rights under trademark law for use of some trade names,
+trademarks, or service marks; or
+* **f)** Requiring indemnification of licensors and authors of that material by anyone
+who conveys the material (or modified versions of it) with contractual assumptions of
+liability to the recipient, for any liability that these contractual assumptions
+directly impose on those licensors and authors.
+
+All other non-permissive additional terms are considered “further
+restrictions” within the meaning of section 10. If the Program as you received
+it, or any part of it, contains a notice stating that it is governed by this License
+along with a term that is a further restriction, you may remove that term. If a
+license document contains a further restriction but permits relicensing or conveying
+under this License, you may add to a covered work material governed by the terms of
+that license document, provided that the further restriction does not survive such
+relicensing or conveying.
+
+If you add terms to a covered work in accord with this section, you must place, in
+the relevant source files, a statement of the additional terms that apply to those
+files, or a notice indicating where to find the applicable terms.
+
+Additional terms, permissive or non-permissive, may be stated in the form of a
+separately written license, or stated as exceptions; the above requirements apply
+either way.
+
+### 8. Termination
+
+You may not propagate or modify a covered work except as expressly provided under
+this License. Any attempt otherwise to propagate or modify it is void, and will
+automatically terminate your rights under this License (including any patent licenses
+granted under the third paragraph of section 11).
+
+However, if you cease all violation of this License, then your license from a
+particular copyright holder is reinstated **(a)** provisionally, unless and until the
+copyright holder explicitly and finally terminates your license, and **(b)** permanently,
+if the copyright holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+Moreover, your license from a particular copyright holder is reinstated permanently
+if the copyright holder notifies you of the violation by some reasonable means, this
+is the first time you have received notice of violation of this License (for any
+work) from that copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+Termination of your rights under this section does not terminate the licenses of
+parties who have received copies or rights from you under this License. If your
+rights have been terminated and not permanently reinstated, you do not qualify to
+receive new licenses for the same material under section 10.
+
+### 9. Acceptance Not Required for Having Copies
+
+You are not required to accept this License in order to receive or run a copy of the
+Program. Ancillary propagation of a covered work occurring solely as a consequence of
+using peer-to-peer transmission to receive a copy likewise does not require
+acceptance. However, nothing other than this License grants you permission to
+propagate or modify any covered work. These actions infringe copyright if you do not
+accept this License. Therefore, by modifying or propagating a covered work, you
+indicate your acceptance of this License to do so.
+
+### 10. Automatic Licensing of Downstream Recipients
+
+Each time you convey a covered work, the recipient automatically receives a license
+from the original licensors, to run, modify and propagate that work, subject to this
+License. You are not responsible for enforcing compliance by third parties with this
+License.
+
+An “entity transaction” is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an organization, or
+merging organizations. If propagation of a covered work results from an entity
+transaction, each party to that transaction who receives a copy of the work also
+receives whatever licenses to the work the party's predecessor in interest had or
+could give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if the predecessor
+has it or can get it with reasonable efforts.
+
+You may not impose any further restrictions on the exercise of the rights granted or
+affirmed under this License. For example, you may not impose a license fee, royalty,
+or other charge for exercise of rights granted under this License, and you may not
+initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging
+that any patent claim is infringed by making, using, selling, offering for sale, or
+importing the Program or any portion of it.
+
+### 11. Patents
+
+A “contributor” is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The work thus
+licensed is called the contributor's “contributor version”.
+
+A contributor's “essential patent claims” are all patent claims owned or
+controlled by the contributor, whether already acquired or hereafter acquired, that
+would be infringed by some manner, permitted by this License, of making, using, or
+selling its contributor version, but do not include claims that would be infringed
+only as a consequence of further modification of the contributor version. For
+purposes of this definition, “control” includes the right to grant patent
+sublicenses in a manner consistent with the requirements of this License.
+
+Each contributor grants you a non-exclusive, worldwide, royalty-free patent license
+under the contributor's essential patent claims, to make, use, sell, offer for sale,
+import and otherwise run, modify and propagate the contents of its contributor
+version.
+
+In the following three paragraphs, a “patent license” is any express
+agreement or commitment, however denominated, not to enforce a patent (such as an
+express permission to practice a patent or covenant not to sue for patent
+infringement). To “grant” such a patent license to a party means to make
+such an agreement or commitment not to enforce a patent against the party.
+
+If you convey a covered work, knowingly relying on a patent license, and the
+Corresponding Source of the work is not available for anyone to copy, free of charge
+and under the terms of this License, through a publicly available network server or
+other readily accessible means, then you must either **(1)** cause the Corresponding
+Source to be so available, or **(2)** arrange to deprive yourself of the benefit of the
+patent license for this particular work, or **(3)** arrange, in a manner consistent with
+the requirements of this License, to extend the patent license to downstream
+recipients. “Knowingly relying” means you have actual knowledge that, but
+for the patent license, your conveying the covered work in a country, or your
+recipient's use of the covered work in a country, would infringe one or more
+identifiable patents in that country that you have reason to believe are valid.
+
+If, pursuant to or in connection with a single transaction or arrangement, you
+convey, or propagate by procuring conveyance of, a covered work, and grant a patent
+license to some of the parties receiving the covered work authorizing them to use,
+propagate, modify or convey a specific copy of the covered work, then the patent
+license you grant is automatically extended to all recipients of the covered work and
+works based on it.
+
+A patent license is “discriminatory” if it does not include within the
+scope of its coverage, prohibits the exercise of, or is conditioned on the
+non-exercise of one or more of the rights that are specifically granted under this
+License. You may not convey a covered work if you are a party to an arrangement with
+a third party that is in the business of distributing software, under which you make
+payment to the third party based on the extent of your activity of conveying the
+work, and under which the third party grants, to any of the parties who would receive
+the covered work from you, a discriminatory patent license **(a)** in connection with
+copies of the covered work conveyed by you (or copies made from those copies), or **(b)**
+primarily for and in connection with specific products or compilations that contain
+the covered work, unless you entered into that arrangement, or that patent license
+was granted, prior to 28 March 2007.
+
+Nothing in this License shall be construed as excluding or limiting any implied
+license or other defenses to infringement that may otherwise be available to you
+under applicable patent law.
+
+### 12. No Surrender of Others' Freedom
+
+If conditions are imposed on you (whether by court order, agreement or otherwise)
+that contradict the conditions of this License, they do not excuse you from the
+conditions of this License. If you cannot convey a covered work so as to satisfy
+simultaneously your obligations under this License and any other pertinent
+obligations, then as a consequence you may not convey it at all. For example, if you
+agree to terms that obligate you to collect a royalty for further conveying from
+those to whom you convey the Program, the only way you could satisfy both those terms
+and this License would be to refrain entirely from conveying the Program.
+
+### 13. Use with the GNU Affero General Public License
+
+Notwithstanding any other provision of this License, you have permission to link or
+combine any covered work with a work licensed under version 3 of the GNU Affero
+General Public License into a single combined work, and to convey the resulting work.
+The terms of this License will continue to apply to the part which is the covered
+work, but the special requirements of the GNU Affero General Public License, section
+13, concerning interaction through a network will apply to the combination as such.
+
+### 14. Revised Versions of this License
+
+The Free Software Foundation may publish revised and/or new versions of the GNU
+General Public License from time to time. Such new versions will be similar in spirit
+to the present version, but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program specifies that
+a certain numbered version of the GNU General Public License “or any later
+version” applies to it, you have the option of following the terms and
+conditions either of that numbered version or of any later version published by the
+Free Software Foundation. If the Program does not specify a version number of the GNU
+General Public License, you may choose any version ever published by the Free
+Software Foundation.
+
+If the Program specifies that a proxy can decide which future versions of the GNU
+General Public License can be used, that proxy's public statement of acceptance of a
+version permanently authorizes you to choose that version for the Program.
+
+Later license versions may give you additional or different permissions. However, no
+additional obligations are imposed on any author or copyright holder as a result of
+your choosing to follow a later version.
+
+### 15. Disclaimer of Warranty
+
+THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER
+EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE
+QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE
+DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+### 16. Limitation of Liability
+
+IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY
+COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS
+PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
+INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE
+OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE
+WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+### 17. Interpretation of Sections 15 and 16
+
+If the disclaimer of warranty and limitation of liability provided above cannot be
+given local legal effect according to their terms, reviewing courts shall apply local
+law that most closely approximates an absolute waiver of all civil liability in
+connection with the Program, unless a warranty or assumption of liability accompanies
+a copy of the Program in return for a fee.
+
+_END OF TERMS AND CONDITIONS_
+
+## How to Apply These Terms to Your New Programs
+
+If you develop a new program, and you want it to be of the greatest possible use to
+the public, the best way to achieve this is to make it free software which everyone
+can redistribute and change under these terms.
+
+To do so, attach the following notices to the program. It is safest to attach them
+to the start of each source file to most effectively state the exclusion of warranty;
+and each file should have at least the “copyright” line and a pointer to
+where the full notice is found.
+
+    hBayesDM: An R package for hierarchical Bayesian modeling of RLDM tasks.
+    Copyright (C) 2018 CCS-Lab
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program does terminal interaction, make it output a short notice like this
+when it starts in an interactive mode:
+
+    hBayesDM Copyright (C) 2018 CCS-Lab
+    This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type 'show c' for details.
+
+The hypothetical commands `show w` and `show c` should show the appropriate parts of
+the General Public License. Of course, your program's commands might be different;
+for a GUI interface, you would use an “about box”.
+
+You should also get your employer (if you work as a programmer) or school, if any, to
+sign a “copyright disclaimer” for the program, if necessary. For more
+information on this, and how to apply and follow the GNU GPL, see
+&lt;<http://www.gnu.org/licenses/>&gt;.
+
+The GNU General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may consider it
+more useful to permit linking proprietary applications with the library. If this is
+what you want to do, use the GNU Lesser General Public License instead of this
+License. But first, please read
+&lt;<http://www.gnu.org/philosophy/why-not-lgpl.html>&gt;.
+
+ +
+ +
+ + +
+ + +
+

Site built with pkgdown 1.3.0.

+
+
+
+ + + + + + diff --git a/docs/LICENSE.html b/docs/LICENSE.html new file mode 100644 index 00000000..420121cd --- /dev/null +++ b/docs/LICENSE.html @@ -0,0 +1,341 @@ + + + + + + + + +GNU General Public License • hBayesDM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+
+ + +
+ +

Version 3, 29 June 2007
Copyright © 2007 Free Software Foundation, Inc. <http://fsf.org/>

+

Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.

+
+

+Preamble

+

The GNU General Public License is a free, copyleft license for software and other kinds of works.

+

The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program–to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.

+

When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.

+

To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.

+

For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.

+

Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.

+

For the developers’ and authors’ protection, the GPL clearly explains that there is no warranty for this free software. For both users’ and authors’ sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.

+

Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users’ freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.

+

Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.

+

The precise terms and conditions for copying, distribution and modification follow.

+
+
+

+TERMS AND CONDITIONS

+
+

+0. Definitions

+

“This License” refers to version 3 of the GNU General Public License.

+

“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.

+

“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.

+

To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.

+

A “covered work” means either the unmodified Program or a work based on the Program.

+

To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.

+

To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.

+

An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.

+
+
+

+1. Source Code

+

The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.

+

A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.

+

The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.

+

The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work’s System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.

+

The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.

+

The Corresponding Source for a work in source code form is that same work.

+
+
+

+2. Basic Permissions

+

All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.

+

You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.

+

Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.

+
+ +
+

+4. Conveying Verbatim Copies

+

You may convey verbatim copies of the Program’s source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.

+

You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.

+
+
+

+5. Conveying Modified Source Versions

+

You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:

+
    +
  • +a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
  • +
  • +b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
  • +
  • +c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
  • +
  • +d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
  • +
+

A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation’s users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.

+
+
+

+6. Conveying Non-Source Forms

+

You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:

+
    +
  • +a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
  • +
  • +b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
  • +
  • +c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
  • +
  • +d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
  • +
  • +e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
  • +
+

A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.

+

A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.

+

“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.

+

If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).

+

The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.

+

Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.

+
+
+

+7. Additional Terms

+

“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.

+

When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.

+

Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:

+
    +
  • +a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
  • +
  • +b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
  • +
  • +c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
  • +
  • +d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
  • +
  • +e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
  • +
  • +f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
  • +
+

All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.

+

If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.

+

Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.

+
+
+

+8. Termination

+

You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).

+

However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.

+

Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.

+

Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.

+
+
+

+9. Acceptance Not Required for Having Copies

+

You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.

+
+
+

+10. Automatic Licensing of Downstream Recipients

+

Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.

+

An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party’s predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.

+

You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.

+
+
+

+11. Patents

+

A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor’s “contributor version”.

+

A contributor’s “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.

+

Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor’s essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.

+

In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.

+

If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient’s use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.

+

If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.

+

A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.

+

Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.

+
+
+

+12. No Surrender of Others’ Freedom

+

If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.

+
+
+

+13. Use with the GNU Affero General Public License

+

Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.

+
+
+

+14. Revised Versions of this License

+

The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.

+

Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.

+

If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy’s public statement of acceptance of a version permanently authorizes you to choose that version for the Program.

+

Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.

+
+
+

+15. Disclaimer of Warranty

+

THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.

+
+
+

+16. Limitation of Liability

+

IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.

+
+
+

+17. Interpretation of Sections 15 and 16

+

If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.

+

END OF TERMS AND CONDITIONS

+
+
+
+

+How to Apply These Terms to Your New Programs

+

If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms.

+

To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the “copyright” line and a pointer to where the full notice is found.

+
hBayesDM: An R package for hierarchical Bayesian modeling of RLDM tasks.
+Copyright (C) 2018 CCS-Lab
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program.  If not, see <http://www.gnu.org/licenses/>.
+

Also add information on how to contact you by electronic and paper mail.

+

If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode:

+
hBayesDM Copyright (C) 2018 CCS-Lab
+This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
+This is free software, and you are welcome to redistribute it
+under certain conditions; type 'show c' for details.
+

The hypothetical commands show w and show c should show the appropriate parts of the General Public License. Of course, your program’s commands might be different; for a GUI interface, you would use an “about box”.

+

You should also get your employer (if you work as a programmer) or school, if any, to sign a “copyright disclaimer” for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see <http://www.gnu.org/licenses/>.

+

The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read <http://www.gnu.org/philosophy/why-not-lgpl.html>.

+
+
+ +
+ +
+ + +
+ + +
+

Site built with pkgdown 1.2.0.

+
+
+
+ + + + + + diff --git a/docs/authors.html b/docs/authors.html index 59c87e78..158b8d7c 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -1,6 +1,6 @@ - + @@ -9,17 +9,17 @@ Citation and Authors • hBayesDM - + - - + + - + - + @@ -35,7 +35,8 @@ - + + + Hierarchical Bayesian Modeling of Decision-Making Tasks • hBayesDM - - - + + + - @@ -25,14 +25,15 @@
-
+
@@ -71,20 +75,35 @@

Prerequisite

To install hBayesDM, RStan should be properly installed before you proceed. For detailed instructions, please go to this link: https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started

-

For the moment, RStan requires you to specify that the C++14 standard should be used to compile Stan programs (based on this link):

-
Sys.setenv(USE_CXX14 = 1)
-library("rstan") # observe startup messages

Installation

hBayesDM can be installed from CRAN by running the following command in R:

-
install.packages('hBayesDM')  # Install hBayesDM from CRAN
-

We strongly recommend users to install hBayesDM from GitHub. All models in this GitHub version are precompiled, which saves time for compiling Stan models. However, it may cause some memory allocation issues on a Windows machine.

-

You can install the latest version from GitHub with:

-
# `devtools` is required to install hBayesDM from GitHub
-if (!require(devtools)) install.packages("devtools")
-devtools::install_github("CCS-Lab/hBayesDM")
+
install.packages("hBayesDM")  # Install hBayesDM from CRAN
+

or you can also install via GitHub with:

+ +
+

+Building at once

+

In default, you should build a Stan file into a binary for the first time to use the model, so it can be quite bothersome. In order to build all the models at once, you should set an environmental variable BUILD_ALL to true. We highly recommend you to use multiple cores for build, since it requires quite a long time to complete.

+
Sys.setenv(BUILD_ALL='true')  # Build all the models on installation
+Sys.setenv(MAKEFLAGS='-j 4')  # Use 4 cores for compilation (or the number you want)
+
+install.packages("hBayesDM")  # Install from CRAN
+# or
+devtools::install_github("CCS-Lab/hBayesDM")  # Install from GitHub
+
+
+
+

+Caveats

+

Before you load hBayesDM, you should load rstan to make sampling properly work.

+
library(rstan)
+library(hBayesDM)
- -
-

Site built with pkgdown.

+

Site built with pkgdown 1.3.0.

-
diff --git a/docs/news/index.html b/docs/news/index.html new file mode 100644 index 00000000..79cadf6e --- /dev/null +++ b/docs/news/index.html @@ -0,0 +1,324 @@ + + + + + + + + +Changelog • hBayesDM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+
+ + +
+

+hBayesDM 0.7.0 2018-12-13 +

+
    +
  • Now, in default, you should build a Stan file into a binary for the first time to use it. To build all the models on installation, you should set an environmental variable BUILD_ALL to true before installation.
  • +
  • Now all the implemented models are refactored using hBayesDM_model function. You don’t have to change anything to use them, but developers can easily implement new model now!
  • +
  • We added a Kalman filter model for 4-armed bandit task (bandit4arm2_kalman_filter; Daw et al., 2006) and a probability weighting function for general description-based tasks (dbdm_prob_weight; Erev et al., 2010; Hertwig et al., 2004; Jessup et al., 2008).
  • +
  • Initial values of parameter estimation for some models are updated as plausible values, and the parameter boundaries of several models are fixed (see more on issue #63 and #64 in Github).
  • +
  • Exponential and linear models for choice under risk and ambiguity task now have four model regressors: sv, sv_fix, sv_var, and p_var.
  • +
  • Fix the Travix CI settings and related codes to be properly passed.
  • +
+
+
+

+hBayesDM 0.6.3 Unreleased +

+
    +
  • Update the dependencies on rstan (>= 2.18.1)
  • +
  • No changes on model files, as same as the version 0.6.2
  • +
+
+
+

+hBayesDM 0.6.2 Unreleased +

+
    +
  • Fix an error on choiceRT_ddm (#44)
  • +
+
+
+

+hBayesDM 0.6.1 Unreleased +

+
    +
  • Solve an issue with built binary files.
  • +
  • Fix an error on peer_ocu with misplaced parentheses.
  • +
+
+
+

+hBayesDM 0.6.0 2018-09-11 +

+
    +
  • Add new tasks (Balloon Analogue Risk Task, Choice under Risk and Ambiguity Task, Probabilistic Selection Task, Risky Decision Task (a.k.a. Happiness task), Wisconsin Card Sorting Task)
  • +
  • Add a new model for the Iowa Gambling Task (igt_orl)
  • +
  • Change priors (Half-Cauchy(0, 5) –> Half-Cauchy(0, 1) or Half-Normal(0, 0.2)
  • +
  • printFit function now provides LOOIC weights and/or WAIC weights
  • +
+
+
+

+hBayesDM 0.5.1 Unreleased +

+
    +
  • Add models for the Two Step task
  • +
  • Add models without indecision point parameter (alpha) for the PRL task (prl_*_woa.stan)
  • +
  • Model-based regressors for the PRL task are now available
  • +
  • For the PRL task & prl_fictitious.stan & prl_fictitious_rp.stan –> change the range of alpha (indecision point) from [0, 1] to [-Inf, Inf]
  • +
+
+
+

+hBayesDM 0.5.0 2018-01-03 +

+
    +
  • Support variational Bayesian methods (vb=TRUE)
  • +
  • Allow posterior predictive checks, except for drift-diffusion models (inc_postpred=TRUE)
  • +
  • Add the peer influence task (Chung et al., 2015, USE WITH CAUTION for now and PLEASE GIVE US FEEDBACK!)
  • +
  • Add ‘prl_fictitious_rp’ model
  • +
  • Made changes to be compatible with the newest Stan version (e.g., // instead of # for commenting).
  • +
  • In ’prl_*’ models, ‘rewlos’ is replaced by ‘outcome’ so that column names and labels would be consistent across tasks as much as possible.
  • +
  • Email feature is disabled as R mail package does not allow users to send anonymous emails anymore.
  • +
  • When outputs are saved as a file (*.RData), the file name now contains the name of the data file.
  • +
+
+
+

+hBayesDM 0.4.0 2017-05-23 +

+
    +
  • Add a choice reaction time task and evidence accumulation models +
      +
    • Drift diffusion model (both hierarchical and single-subject)
    • +
    • Linear Ballistic Accumulator (LBA) model (both hierarchical and single-subject)
    • +
    +
  • +
  • Add PRL models that can fit multiple blocks
  • +
  • Add single-subject versions for the delay discounting task (dd_hyperbolic_single and dd_cs_single).
  • +
  • Standardize variable names across all models (e.g., rewlos –> outcome for all models)
  • +
  • Separate versions for CRAN and GitHub. All models/features are identical but the GitHub version contains precompilled models.
  • +
+
+
+

+hBayesDM 0.3.1 Unreleased +

+
    +
  • Remove dependence on the modeest package. Now use a built-in function to estimate the mode of a posterior distribution.
  • +
  • Rewrite the “printFit” function.
  • +
+
+
+

+hBayesDM 0.3.0 2017-01-22 +

+
    +
  • Made several changes following the guidelines for R packages providing interfaces to Stan.
  • +
  • Stan models are precompiled and models will run immediately when called.
  • +
  • The default number of chains is set to 4.
  • +
  • The default value of adapt_delta is set to 0.95 to reduce the potential for divergences.
  • +
  • The “printFit” function uses LOOIC by default. Users can select WAIC or both (LOOIC & WAIC) if needed.
  • +
+
+
+

+hBayesDM 0.2.3.3 2016-12-28 +

+
    +
  • Add help files
  • +
  • Add a function for checking Rhat values (rhat).
  • +
  • Change a link to its tutorial website
  • +
+
+
+

+hBayesDM 0.2.3.2 2016-12-21 +

+
    +
  • Use wide normal distributions for unbounded parameters (gng_* models).
  • +
  • Automatic removal of rows (trials) containing NAs.
  • +
+
+
+

+hBayesDM 0.2.3.1 2016-09-30 +

+
    +
  • Add a function for plotting individual parameters (plotInd)
  • +
+
+
+

+hBayesDM 0.2.3 2016-07-17 +

+
    +
  • Add a new task: the Ultimatum Game
  • +
  • Add new models for the Probabilistic Reversal Learning and Risk Aversion tasks
  • +
  • ‘bandit2arm’ -> change its name to ‘bandit2arm_delta’. Now all model names are in the same format (i.e., TASK_MODEL).
  • +
  • Users can extract model-based regressors from gng_m* models
  • +
  • Include the option of customizing control parameters (adapt_delta, max_treedepth, stepsize)
  • +
  • ‘plotHDI’ function -> add ‘fontSize’ argument & change the color of histogram
  • +
+
+
+

+hBayesDM 0.2.1 2016-04-03 +

+
+

+Bug fixes

+
    +
  • All models: Fix errors when indPars=“mode”
  • +
  • ra_prospect model: Add description for column names of a data (*.txt) file
  • +
+
+
+

+Change

+
    +
  • Change standard deviations of ‘b’ and ‘pi’ priors in gng_* models
  • +
+
+
+
+

+hBayesDM 0.2.0 2016-03-25 +

+

Initially released.

+
+
+ + + +
+ +
+ + +
+

Site built with pkgdown 1.3.0.

+
+
+
+ + + + + + diff --git a/docs/pkgdown.css b/docs/pkgdown.css index 6ca2f37a..c03fb08d 100644 --- a/docs/pkgdown.css +++ b/docs/pkgdown.css @@ -58,9 +58,14 @@ img { max-width: 100%; } +/* Fix bug in bootstrap (only seen in firefox) */ +summary { + display: list-item; +} + /* Typographic tweaking ---------------------------------*/ -.contents h1.page-header { +.contents .page-header { margin-top: calc(-60px + 1em); } @@ -136,10 +141,9 @@ a.anchor { .ref-index th {font-weight: normal;} .ref-index td {vertical-align: top;} +.ref-index .icon {width: 40px;} .ref-index .alias {width: 40%;} -.ref-index .title {width: 60%;} - -.ref-index .alias {width: 40%;} +.ref-index-icons .alias {width: calc(40% - 40px);} .ref-index .title {width: 60%;} .ref-arguments th {text-align: right; padding-right: 10px;} diff --git a/docs/pkgdown.js b/docs/pkgdown.js index de9bd724..eb7e83d2 100644 --- a/docs/pkgdown.js +++ b/docs/pkgdown.js @@ -25,9 +25,13 @@ for (var i = 0; i < links.length; i++) { if (links[i].getAttribute("href") === "#") continue; - var path = paths(links[i].pathname); + // Ignore external links + if (links[i].host !== location.host) + continue; + + var nav_path = paths(links[i].pathname); - var length = prefix_length(cur_path, path); + var length = prefix_length(nav_path, cur_path); if (length > max_length) { max_length = length; pos = i; @@ -52,13 +56,14 @@ return(pieces); } + // Returns -1 if not found function prefix_length(needle, haystack) { if (needle.length > haystack.length) - return(0); + return(-1); // Special case for length-0 haystack, since for loop won't run if (haystack.length === 0) { - return(needle.length === 0 ? 1 : 0); + return(needle.length === 0 ? 0 : -1); } for (var i = 0; i < haystack.length; i++) { @@ -78,9 +83,9 @@ element.setAttribute('data-original-title', tooltipOriginalTitle); } - if(Clipboard.isSupported()) { + if(ClipboardJS.isSupported()) { $(document).ready(function() { - var copyButton = ""; + var copyButton = ""; $(".examples, div.sourceCode").addClass("hasCopyButton"); @@ -91,7 +96,7 @@ $('.btn-copy-ex').tooltip({container: 'body'}); // Initialize clipboard: - var clipboardBtnCopies = new Clipboard('[data-clipboard-copy]', { + var clipboardBtnCopies = new ClipboardJS('[data-clipboard-copy]', { text: function(trigger) { return trigger.parentNode.textContent; } diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml index f279cc12..254fe914 100644 --- a/docs/pkgdown.yml +++ b/docs/pkgdown.yml @@ -1,5 +1,5 @@ -pandoc: 1.19.2.1 -pkgdown: 1.1.0 +pandoc: 2.3.1 +pkgdown: 1.3.0 pkgdown_sha: ~ articles: [] diff --git a/docs/reference/HDIofMCMC.html b/docs/reference/HDIofMCMC.html index 2db111e3..61216040 100644 --- a/docs/reference/HDIofMCMC.html +++ b/docs/reference/HDIofMCMC.html @@ -1,6 +1,6 @@ - + @@ -9,17 +9,17 @@ Compute Highest-Density Interval — HDIofMCMC • hBayesDM - + - - + + - + - + @@ -39,7 +39,8 @@ - + + - + -Two-Arm Bandit Task — bandit2arm_delta • hBayesDM +2-Armed Bandit Task (Erev et al., 2010; Hertwig et al., 2004) — bandit2arm_delta • hBayesDM - + - - + + - + - + @@ -30,17 +30,18 @@ - + - + - + + + + + + + + + +4-Armed Bandit Task (2) — bandit4arm2_kalman_filter • hBayesDM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+
+ + +
+ +

Hierarchical Bayesian Modeling of the 4-Armed Bandit Task (2) with the following parameters: + "lambda" (decay factor), "theta" (decay center), "beta" (inverse softmax temperature), "mu0" (anticipated initial mean of all 4 options), "sigma0" (anticipated initial sd (uncertainty factor) of all 4 options), "sigmaD" (sd of diffusion noise).

+

Contributor: Yoonseo Zoh, Lei Zhang

+

MODEL: Kalman Filter (Daw et al., 2006, Nature)

+ +
+ +
bandit4arm2_kalman_filter(data = "choose", niter = 4000,
+  nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1,
+  inits = "random", indPars = "mean", modelRegressor = FALSE,
+  vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95,
+  stepsize = 1, max_treedepth = 10, ...)
+ +

Arguments

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
data

A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See Details below for more information.

niter

Number of iterations, including warm-up. Defaults to 4000.

nwarmup

Number of iterations used for warm-up only. Defaults to 1000.

nchain

Number of Markov chains to run. Defaults to 4.

ncore

Number of CPUs to be used for running. Defaults to 1.

nthin

Every i == nthin sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.

inits

Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.

indPars

Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".

modelRegressor

Export model-based regressors? TRUE or FALSE. +Currently not available for this model.

vb

Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.

inc_postpred

Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.

adapt_delta

Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See Details below.

stepsize

Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See Details below.

max_treedepth

Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See Details below.

...

Not used for this model.

+ +

Value

+ +

A class "hBayesDM" object modelData with the following components:

+
model

Character value that is the name of the model ("bandit4arm2_kalman_filter").

+
allIndPars

Data.frame containing the summarized parameter values (as specified by + indPars) for each subject.

+
parVals

List object containing the posterior samples over different parameters.

+
fit

A class stanfit object that contains the fitted Stan + model.

+
rawdata

Data.frame containing the raw data used to fit the model, as specified by + the user.

+ + + +
+ + +

Details

+ +

This section describes some of the function arguments in greater detail.

+

data should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a tab-delimited text + file, whose rows represent trial-by-trial observations and columns represent variables.
+For the 4-Armed Bandit Task (2), there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below:

+
"subjID"

A unique identifier for each subject in the data-set.

+
"choice"

Integer value representing the option chosen on the given trial: 1, 2, 3, or 4.

+
"outcome"

Integer value representing the outcome of the given trial (where reward == 1, and loss == -1).

+

*Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns.

+

nwarmup is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + nwarmup argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors.

+

nchain is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + plot(output, type = "trace"). The trace-plot should resemble a "furry caterpillar".

+

nthin is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every i == nthin samples to generate posterior distributions. By default, + nthin is equal to 1, meaning that every sample is used to generate the posterior.

+

Control Parameters: adapt_delta, stepsize, and max_treedepth are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the Stan User's Guide + and Reference Manual, or to the help page for stan for a less technical + description of these arguments.

+ +

References

+ +

Daw, N. D., O'Doherty, J. P., Dayan, P., Seymour, B., & Dolan, R. J. (2006). Cortical substrates + for exploratory decisions in humans. Nature, 441(7095), 876-879.

+ +

See also

+ +

We refer users to our in-depth tutorial for an example of using hBayesDM: + https://rpubs.com/CCSL/hBayesDM

+ + +

Examples

+
# NOT RUN {
+# Run the model and store results in "output"
+output <- bandit4arm2_kalman_filter("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4)
+
+# Visually check convergence of the sampling chains (should look like 'hairy caterpillars')
+plot(output, type = "trace")
+
+# Check Rhat values (all Rhat values should be less than or equal to 1.1)
+rhat(output)
+
+# Plot the posterior distributions of the hyper-parameters (distributions should be unimodal)
+plot(output)
+
+# Show the WAIC and LOOIC model fit estimates
+printFit(output)
+# }
+
+ +
+ +
+ + +
+

Site built with pkgdown 1.3.0.

+
+
+
+ + + + + + diff --git a/docs/reference/bandit4arm_4par.html b/docs/reference/bandit4arm_4par.html index eedfa860..2d84092c 100644 --- a/docs/reference/bandit4arm_4par.html +++ b/docs/reference/bandit4arm_4par.html @@ -1,25 +1,25 @@ - + -4-armed bandit task — bandit4arm_4par • hBayesDM +4-Armed Bandit Task — bandit4arm_4par • hBayesDM - + - - + + - + - + @@ -30,17 +30,18 @@ - + - + - + + - + -4-armed bandit task — bandit4arm_lapse • hBayesDM +4-Armed Bandit Task — bandit4arm_lapse • hBayesDM - + - - + + - + - + @@ -30,17 +30,18 @@ - + - + - + + - + @@ -9,17 +9,17 @@ Balloon Analogue Risk Task (Ravenzwaaij et al., 2011, Journal of Mathematical Psychology) — bart_par4 • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - +MODEL: Re-parameterized version (by Harhim Park &amp; Jaeyeong Yang) of BART Model (Ravenzwaaij et al., 2011) with 4 parameters" /> - + + - + -Choice Reaction Time task, drift diffusion modeling — choiceRT_ddm • hBayesDM +Choice Reaction Time Task — choiceRT_ddm • hBayesDM - + - - + + - + - + @@ -30,20 +30,24 @@ - + - + - + + - + -Choice Reaction Time task, drift diffusion modeling — choiceRT_ddm_single • hBayesDM +Choice Reaction Time Task — choiceRT_ddm_single • hBayesDM - + - - + + - + - + @@ -30,20 +30,24 @@ - + - + - + + - + @@ -9,17 +9,17 @@ Choice Reaction Time task, linear ballistic accumulator modeling — choiceRT_lba • hBayesDM - + - - + + - + - + @@ -44,7 +44,8 @@ - + + - + @@ -9,17 +9,17 @@ Choice Reaction Time task, linear ballistic accumulator modeling — choiceRT_lba_single • hBayesDM - + - - + + - + - + @@ -44,7 +44,8 @@ - + + - + -Choice under Risk and Ambiguity Task — cra_exp • hBayesDM +Choice Under Risk and Ambiguity Task — cra_exp • hBayesDM - + - - + + - + - + @@ -30,22 +30,19 @@ - + - +MODEL: Exponential Subjective Value Model (Hsu et al., 2005, Science)" /> - + + - + -Choice under Risk and Ambiguity Task — cra_linear • hBayesDM +Choice Under Risk and Ambiguity Task — cra_linear • hBayesDM - + - - + + - + - + @@ -30,22 +30,19 @@ - + - +MODEL: Linear Subjective Value Model (Levy et al., 2010, J Neurophysiol)" /> - + + + + + + + + + +Description Based Decison Making Task — dbdm_prob_weight • hBayesDM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+
+ + +
+ +

Hierarchical Bayesian Modeling of the Description Based Decison Making Task with the following parameters: + "tau" (probability weight function), "rho" (subject utility function), "lambda" (loss aversion parameter), "beta" (inverse softmax temperature).

+

MODEL: Probability Weight Function (Erev et al., 2010; Hertwig et al., 2004; Jessup et al., 2008)

+ +
+ +
dbdm_prob_weight(data = "choose", niter = 4000, nwarmup = 1000,
+  nchain = 4, ncore = 1, nthin = 1, inits = "random",
+  indPars = "mean", modelRegressor = FALSE, vb = FALSE,
+  inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1,
+  max_treedepth = 10, ...)
+ +

Arguments

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
data

A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "opt1hprob", "opt2hprob", "opt1hval", "opt1lval", "opt2hval", "opt2lval", "choice". See Details below for more information.

niter

Number of iterations, including warm-up. Defaults to 4000.

nwarmup

Number of iterations used for warm-up only. Defaults to 1000.

nchain

Number of Markov chains to run. Defaults to 4.

ncore

Number of CPUs to be used for running. Defaults to 1.

nthin

Every i == nthin sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.

inits

Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.

indPars

Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".

modelRegressor

Export model-based regressors? TRUE or FALSE. +Currently not available for this model.

vb

Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.

inc_postpred

Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.

adapt_delta

Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See Details below.

stepsize

Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See Details below.

max_treedepth

Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See Details below.

...

Not used for this model.

+ +

Value

+ +

A class "hBayesDM" object modelData with the following components:

+
model

Character value that is the name of the model ("dbdm_prob_weight").

+
allIndPars

Data.frame containing the summarized parameter values (as specified by + indPars) for each subject.

+
parVals

List object containing the posterior samples over different parameters.

+
fit

A class stanfit object that contains the fitted Stan + model.

+
rawdata

Data.frame containing the raw data used to fit the model, as specified by + the user.

+ + + +
+ + +

Details

+ +

This section describes some of the function arguments in greater detail.

+

data should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a tab-delimited text + file, whose rows represent trial-by-trial observations and columns represent variables.
+For the Description Based Decison Making Task, there should be 8 columns of data with the + labels "subjID", "opt1hprob", "opt2hprob", "opt1hval", "opt1lval", "opt2hval", "opt2lval", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below:

+
"subjID"

A unique identifier for each subject in the data-set.

+
"opt1hprob"

Possiblity of getting higher value of outcome(opt1hval) when choosing option 1.

+
"opt2hprob"

Possiblity of getting higher value of outcome(opt2hval) when choosing option 2.

+
"opt1hval"

Possible (with opt1hprob probability) outcome of option 1.

+
"opt1lval"

Possible (with (1 - opt1hprob) probability) outcome of option 1.

+
"opt2hval"

Possible (with opt2hprob probability) outcome of option 2.

+
"opt2lval"

Possible (with (1 - opt2hprob) probability) outcome of option 2.

+
"choice"

If option 1 was selected, choice == 1; else if option 2 was selected, choice == 2.

+

*Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns.

+

nwarmup is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + nwarmup argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors.

+

nchain is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + plot(output, type = "trace"). The trace-plot should resemble a "furry caterpillar".

+

nthin is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every i == nthin samples to generate posterior distributions. By default, + nthin is equal to 1, meaning that every sample is used to generate the posterior.

+

Control Parameters: adapt_delta, stepsize, and max_treedepth are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the Stan User's Guide + and Reference Manual, or to the help page for stan for a less technical + description of these arguments.

+ +

References

+ +

Erev, I., Ert, E., Roth, A. E., Haruvy, E., Herzog, S. M., Hau, R., ... & Lebiere, C. (2010). A + choice prediction competition: Choices from experience and from description. Journal of + Behavioral Decision Making, 23(1), 15-47.

+

Hertwig, R., Barron, G., Weber, E. U., & Erev, I. (2004). Decisions from experience and the + effect of rare events in risky choice. Psychological science, 15(8), 534-539.

+

Jessup, R. K., Bishara, A. J., & Busemeyer, J. R. (2008). Feedback produces divergence from + prospect theory in descriptive choice. Psychological Science, 19(10), 1015-1022.

+ +

See also

+ +

We refer users to our in-depth tutorial for an example of using hBayesDM: + https://rpubs.com/CCSL/hBayesDM

+ + +

Examples

+
# NOT RUN {
+# Run the model and store results in "output"
+output <- dbdm_prob_weight("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4)
+
+# Visually check convergence of the sampling chains (should look like 'hairy caterpillars')
+plot(output, type = "trace")
+
+# Check Rhat values (all Rhat values should be less than or equal to 1.1)
+rhat(output)
+
+# Plot the posterior distributions of the hyper-parameters (distributions should be unimodal)
+plot(output)
+
+# Show the WAIC and LOOIC model fit estimates
+printFit(output)
+# }
+
+ +
+ +
+ + +
+

Site built with pkgdown 1.3.0.

+
+
+
+ + + + + + diff --git a/docs/reference/dd_cs.html b/docs/reference/dd_cs.html index ec5d878f..1441da66 100644 --- a/docs/reference/dd_cs.html +++ b/docs/reference/dd_cs.html @@ -1,6 +1,6 @@ - + @@ -9,17 +9,17 @@ Delay Discounting Task — dd_cs • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + -Delay Discounting Task (Ebert & Prelec, 2007) — dd_cs_single • hBayesDM +Delay Discounting Task — dd_cs_single • hBayesDM - + - - + + - + - + @@ -30,17 +30,18 @@ - + - + - + + - + @@ -9,17 +9,17 @@ Delay Discounting Task — dd_exp • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Delay Discounting Task — dd_hyperbolic • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + -Delay Discounting Task (Ebert & Prelec, 2007) — dd_hyperbolic_single • hBayesDM +Delay Discounting Task — dd_hyperbolic_single • hBayesDM - + - - + + - + - + @@ -30,17 +30,18 @@ - + - + - + + - + @@ -9,17 +9,17 @@ Function to estimate mode of MCMC samples — estimate_mode • hBayesDM - + - - + + - + - + @@ -39,7 +39,8 @@ - + + - + @@ -9,17 +9,17 @@ Extract Model Comparison Estimates — extract_ic • hBayesDM - + - - + + - + - + @@ -38,7 +38,8 @@ - + + - + @@ -9,17 +9,17 @@ Orthogonalized Go/Nogo Task — gng_m1 • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Orthogonalized Go/Nogo Task — gng_m2 • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Orthogonalized Go/Nogo Task — gng_m3 • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Orthogonalized Go/Nogo Task — gng_m4 • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Hierarchical Bayesian Modeling of Decision-Making Tasks — hBayesDM-package • hBayesDM - + - - + + - + - + @@ -78,7 +78,8 @@ - + + + + + + + + + +hBayesDM Model Base Function — hBayesDM_model • hBayesDM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+
+ + +
+ +

The base function from which all hBayesDM model functions are created.

+

Contributor: Jethro Lee

+ +
+ +
hBayesDM_model(task_name, model_name, model_type = "", data_columns,
+  parameters, regressors = NULL, postpreds = "y_pred",
+  stanmodel_arg = NULL, preprocess_func)
+ +

Arguments

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
task_name

Character value for name of task. E.g. "gng".

model_name

Character value for name of model. E.g. "m1".

model_type

Character value for modeling type: "" OR "single" OR +"multipleB".

data_columns

Character vector of necessary column names for the data. E.g. +c("subjID", "cue", "keyPressed", "outcome").

parameters

List of parameters, with information about their lower bound, plausible value, +upper bound. E.g. list("xi" = c(0, 0.1, 1), "ep" = c(0, 0.2, 1), "rho" = c(0, exp(2), +Inf)).

regressors

List of regressors, with information about their extracted dimensions. E.g. +list("Qgo" = 2, "Qnogo" = 2, "Wgo" = 2, "Wnogo" = 2). OR if model-based regressors are +not available for this model, NULL.

postpreds

Character vector of name(s) for the trial-level posterior predictive +simulations. Default is "y_pred". OR if posterior predictions are not yet available for +this model, NULL.

stanmodel_arg

Leave as NULL (default) for completed models. Else should either be a +character value (specifying the name of a Stan file) OR a stanmodel object (returned as +a result of running stan_model).

preprocess_func

Function to preprocess the raw data before it gets passed to Stan. Takes +(at least) two arguments: a data.table object raw_data and a list object +general_info. Possible to include additional argument(s) to use during preprocessing. +Should return a list object data_list, which will then directly be passed to Stan.

+ +

Value

+ +

A specific hBayesDM model function.

+ +

Details

+ +

task_name: Typically same task models share the same data column requirements.

+

model_name: Typically different models are distinguished by their different list of + parameters.

+

model_type is one of the following three:

+
""

Modeling of multiple subjects. (Default hierarchical Bayesian analysis.)

+
"single"

Modeling of a single subject.

+
"multipleB"

Modeling of multiple subjects, where multiple blocks exist within + each subject.

+
+

data_columns must be the entirety of necessary data columns used at some point in the R + or Stan code. I.e. "subjID" must always be included. In the case of 'multipleB' type + models, "block" should also be included as well.

+

parameters is a list object, whose keys are the parameters of this model. Each parameter + key must be assigned a numeric vector holding 3 elements: the parameter's lower bound, + plausible value, and upper bound.

+

regressors is a list object, whose keys are the model-based regressors of this model. + Each regressor key must be assigned a numeric value indicating the number of dimensions its + data will be extracted as. If model-based regressors are not available for this model, this + argument should just be NULL.

+

postpreds defaults to "y_pred", but any other character vector holding + appropriate names is possible (c.f. Two-Step Task models). If posterior predictions are not yet + available for this model, this argument should just be NULL.

+

stanmodel_arg can be used by developers, during the developmental stage of creating a + new model function. If this argument is passed a character value, the Stan file with the + corresponding name will be used for model fitting. If this argument is passed a + stanmodel object, that stanmodel object will be used for model fitting. When + creation of the model function is complete, this argument should just be left as NULL.

+

preprocess_func is the part of the code that is specific to the model, and is thus + written in the specific model R file.
+Arguments for this function are:

+
raw_data

A data.table that holds the raw user data, which was read by using + fread.

+
general_info

A list that holds the general informations about the raw data, i.e. + subjs, n_subj, t_subjs, t_max, b_subjs, b_max.

+
...

Optional additional argument(s) that specific model functions may want to + include. Examples of such additional arguments currently being used in hBayesDM models are: + RTbound (choiceRT_ddm models), payscale (igt models), and trans_prob (ts + models).

+

Return value for this function should be:

+
data_list

A list with appropriately named keys (as required by the model Stan + file), holding the fully preprocessed user data.

+

NOTE: Syntax for data.table slightly differs from that of data.frame. If you want to use + raw_data as a data.frame when writing the preprocess_func, simply begin with the + line: raw_data <- as.data.frame(raw_data).
+NOTE: Because of allowing case & underscore insensitive column names in user data, + raw_data columns must now be referenced by their lowercase non-underscored versions, + e.g. "subjid", within the code of the preprocess function.

+ + +
+ +
+ +
+ + +
+

Site built with pkgdown 1.3.0.

+
+
+
+ + + + + + diff --git a/docs/reference/igt_orl.html b/docs/reference/igt_orl.html index ef7da0a8..cba75e78 100644 --- a/docs/reference/igt_orl.html +++ b/docs/reference/igt_orl.html @@ -1,6 +1,6 @@ - + @@ -9,17 +9,17 @@ Iowa Gambling Task — igt_orl • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - +MODEL: Outcome-Representation Learning Model (Haines et al., 2018, Cognitive Science)" /> - + + - + @@ -9,17 +9,17 @@ Iowa Gambling Task — igt_pvl_decay • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Iowa Gambling Task (Ahn et al., 2008) — igt_pvl_delta • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Iowa Gambling Task — igt_vpp • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Function reference • hBayesDM - + - - + + - + - + @@ -35,7 +35,8 @@ - + + - + @@ -9,17 +9,17 @@ Function to plot multiple figures — multiplot • hBayesDM - + - - + + - + - + @@ -39,7 +39,8 @@ - + + - + -Peer influence task (Chung et al., 2015 Nature Neuroscience) — peer_ocu • hBayesDM +Peer Influence Task (Chung et al., 2015, Nature Neuroscience) — peer_ocu • hBayesDM - + - - + + - + - + @@ -30,18 +30,19 @@ - + - +MODEL: Other-Conferred Utility (OCU) Model" /> - + + - + @@ -9,17 +9,17 @@ General Purpose Plotting for hBayesDM. This function plots hyper parameters. — plot.hBayesDM • hBayesDM - + - - + + - + - + @@ -38,7 +38,8 @@ - + + - + @@ -9,17 +9,17 @@ Plots the histogram of MCMC samples. — plotDist • hBayesDM - + - - + + - + - + @@ -38,7 +38,8 @@ - + + - + @@ -9,17 +9,17 @@ Plots highest density interval (HDI) from (MCMC) samples and prints HDI in the R console. HDI is indicated by a red line. — plotHDI • hBayesDM - + - - + + - + - + @@ -38,7 +38,8 @@ - + + - + @@ -9,17 +9,17 @@ Plots individual posterior distributions, using the stan_plot function of the rstan package — plotInd • hBayesDM - + - - + + - + - + @@ -38,7 +38,8 @@ - + + - + @@ -9,17 +9,17 @@ Print model-fits (mean LOOIC or WAIC values in addition to Akaike weights) of hBayesDM Models — printFit • hBayesDM - + - - + + - + - + @@ -38,7 +38,8 @@ - + + - + @@ -9,17 +9,17 @@ Probabilistic Reversal Learning Task — prl_ewa • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - + - + + - + @@ -9,17 +9,17 @@ Probabilistic Reversal Learning Task — prl_fictitious • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - + - + + - + -Probabilistic Reversal Learning Task (Glascher et al, 2008), multiple blocks per subject — prl_fictitious_multipleB • hBayesDM +Probabilistic Reversal Learning Task — prl_fictitious_multipleB • hBayesDM - + - - + + - + - + @@ -30,18 +30,19 @@ - + - + - + + - + @@ -9,17 +9,17 @@ Probabilistic Reversal Learning Task — prl_fictitious_rp • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - + - + + - + @@ -9,17 +9,17 @@ Probabilistic Reversal Learning Task — prl_fictitious_rp_woa • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - + - + + - + @@ -9,17 +9,17 @@ Probabilistic Reversal Learning Task — prl_fictitious_woa • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - + - + + - + @@ -9,17 +9,17 @@ Probabilistic Reversal Learning Task — prl_rp • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - + - + + - + -Probabilistic Reversal Learning Task, multiple blocks per subject — prl_rp_multipleB • hBayesDM +Probabilistic Reversal Learning Task — prl_rp_multipleB • hBayesDM - + - - + + - + - + @@ -30,18 +30,19 @@ - + - + - + + - + @@ -9,17 +9,17 @@ Probabilistic Selection Task — pst_gainloss_Q • hBayesDM - + - - + + - + - + @@ -32,20 +32,17 @@ - +MODEL: Gain-Loss Q Learning Model (Frank et al., 2007, PNAS)" /> - + + - + @@ -9,17 +9,17 @@ Risk Aversion Task — ra_noLA • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Risk Aversion Task — ra_noRA • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Risk Aversion Task — ra_prospect • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Risky Decision Task — rdt_happiness • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - +MODEL: Happiness Computational Model (Rutledge et al., 2014, PNAS)" /> - + + - + @@ -9,17 +9,17 @@ Function for extracting Rhat values from an hBayesDM object — rhat • hBayesDM - + - - + + - + - + @@ -40,7 +40,8 @@ - + + - + @@ -9,17 +9,17 @@ Two-Step Task (Daw et al., 2011, Neuron) — ts_par4 • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - +MODEL: Hybrid Model (Daw et al., 2011; Wunderlich et al., 2012), with 4 parameters" /> - + + - + @@ -9,17 +9,17 @@ Two-Step Task (Daw et al., 2011, Neuron) — ts_par6 • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - +MODEL: Hybrid Model (Daw et al., 2011, Neuron), with 6 parameters" /> - + + - + @@ -9,17 +9,17 @@ Two-Step Task (Daw et al., 2011, Neuron) — ts_par7 • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - +MODEL: Hybrid Model (Daw et al., 2011, Neuron), with 7 parameters (original model)" /> - + + - + @@ -9,17 +9,17 @@ Norm-Training Ultimatum Game — ug_bayes • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Norm-Training Ultimatum Game — ug_delta • hBayesDM - + - - + + - + - + @@ -32,15 +32,16 @@ - + - + + - + @@ -9,17 +9,17 @@ Wisconsin Card Sorting Task — wcs_sql • hBayesDM - + - - + + - + - + @@ -32,16 +32,17 @@ - +MODEL: Sequential Learning Model (Bishara et al., 2010, Journal of Mathematical Psychology)" /> - + + 0; 2,4 --> 1 @@ -94,10 +96,10 @@ model { // After observing the reward at Level 2... // Update Level 2 v_mf of the chosen option. Level 2--> choose one of level 2 options and observe reward - v_mf[2+ level2_choice[i,t]] = v_mf[2+ level2_choice[i,t]] + a[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); + v_mf[2+ level2_choice[i,t]] += a[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); // Update Level 1 v_mf - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); + v_mf[level1_choice[i,t]] += a[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); } // end of t loop } // end of i loop @@ -126,10 +128,10 @@ generated quantities { } // Generate group level parameter values - mu_a = Phi_approx( mu_p[1] ); - mu_beta = exp( mu_p[2] ); - mu_pi = Phi_approx( mu_p[3] ) * 5; - mu_w = Phi_approx( mu_p[4] ); + mu_a = Phi_approx( mu_pr[1] ); + mu_beta = exp( mu_pr[2] ); + mu_pi = Phi_approx( mu_pr[3] ) * 5; + mu_w = Phi_approx( mu_pr[4] ); { // local section, this saves time and space for (i in 1:N) { @@ -166,10 +168,10 @@ generated quantities { } else{ level1_prob_choice2 = inv_logit( beta[i]*(v_hybrid[2]-v_hybrid[1]) + pi[i]*(2*level1_choice[i,t-1] -3) ); } - log_lik[i] = log_lik[i] + bernoulli_lpmf( level1_choice_01 | level1_prob_choice2 ); + log_lik[i] += bernoulli_lpmf( level1_choice_01 | level1_prob_choice2 ); // Observe Level2 and update Level1 of the chosen option - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); + v_mf[level1_choice[i,t]] += a[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); // Prob of choosing stim 2 (2 from [1,2] OR 4 from [3,4]) in ** Level (step) 2 ** level2_choice_01 = 1 - modulus(level2_choice[i,t], 2); // 1,3 --> 0; 2,4 @@ -179,23 +181,24 @@ generated quantities { } else { // level2_choice = 1 or 2 level2_prob_choice2 = inv_logit( beta[i]*( v_mf[4] - v_mf[3] ) ); } - log_lik[i] = log_lik[i] + bernoulli_lpmf( level2_choice_01 | level2_prob_choice2 ); + log_lik[i] += bernoulli_lpmf( level2_choice_01 | level2_prob_choice2 ); // generate posterior prediction for current trial y_pred_step1[i,t] = bernoulli_rng(level1_prob_choice2); y_pred_step2[i,t] = bernoulli_rng(level2_prob_choice2); // Observe Level2 and update Level1 of the chosen option - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); + v_mf[level1_choice[i,t]] += a[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); // After observing the reward at Level 2... // Update Level 2 v_mf of the chosen option. Level 2--> choose one of level 2 options and observe reward - v_mf[2+ level2_choice[i,t]] = v_mf[2+ level2_choice[i,t]] + a[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); + v_mf[2+ level2_choice[i,t]] += a[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); // Update Level 1 v_mf - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); + v_mf[level1_choice[i,t]] += a[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); } // end of t loop } // end of i loop - } - } + } +} + diff --git a/inst/stan_files/ts_par6.hpp b/inst/stan_files/ts_par6.hpp new file mode 100644 index 00000000..fd8d7694 --- /dev/null +++ b/inst/stan_files/ts_par6.hpp @@ -0,0 +1,1603 @@ +/* + hBayesDM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + hBayesDM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with hBayesDM. If not, see . +*/ +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.18.0 + +#include + +namespace model_ts_par6_namespace { + +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; + +static int current_statement_begin__; + +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_ts_par6"); + reader.add_event(0, 0, "include", "/pre/license.stan"); + reader.add_event(0, 0, "start", "/pre/license.stan"); + reader.add_event(14, 14, "end", "/pre/license.stan"); + reader.add_event(14, 1, "restart", "model_ts_par6"); + reader.add_event(227, 212, "end", "model_ts_par6"); + return reader; +} + +#include + class model_ts_par6 : public prob_grad { +private: + int N; + int T; + vector Tsubj; + vector > level1_choice; + vector > level2_choice; + vector > reward; + double trans_prob; +public: + model_ts_par6(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, 0, pstream__); + } + + model_ts_par6(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, random_seed__, pstream__); + } + + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + + current_statement_begin__ = -1; + + static const char* function__ = "model_ts_par6_namespace::model_ts_par6"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + // initialize member variables + try { + current_statement_begin__ = 17; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + current_statement_begin__ = 18; + context__.validate_dims("data initialization", "T", "int", context__.to_vec()); + T = int(0); + vals_i__ = context__.vals_i("T"); + pos__ = 0; + T = vals_i__[pos__++]; + current_statement_begin__ = 19; + validate_non_negative_index("Tsubj", "N", N); + context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); + validate_non_negative_index("Tsubj", "N", N); + Tsubj = std::vector(N,int(0)); + vals_i__ = context__.vals_i("Tsubj"); + pos__ = 0; + size_t Tsubj_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { + Tsubj[i_0__] = vals_i__[pos__++]; + } + current_statement_begin__ = 20; + validate_non_negative_index("level1_choice", "N", N); + validate_non_negative_index("level1_choice", "T", T); + context__.validate_dims("data initialization", "level1_choice", "int", context__.to_vec(N,T)); + validate_non_negative_index("level1_choice", "N", N); + validate_non_negative_index("level1_choice", "T", T); + level1_choice = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("level1_choice"); + pos__ = 0; + size_t level1_choice_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < level1_choice_limit_1__; ++i_1__) { + size_t level1_choice_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < level1_choice_limit_0__; ++i_0__) { + level1_choice[i_0__][i_1__] = vals_i__[pos__++]; + } + } + current_statement_begin__ = 21; + validate_non_negative_index("level2_choice", "N", N); + validate_non_negative_index("level2_choice", "T", T); + context__.validate_dims("data initialization", "level2_choice", "int", context__.to_vec(N,T)); + validate_non_negative_index("level2_choice", "N", N); + validate_non_negative_index("level2_choice", "T", T); + level2_choice = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("level2_choice"); + pos__ = 0; + size_t level2_choice_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < level2_choice_limit_1__; ++i_1__) { + size_t level2_choice_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < level2_choice_limit_0__; ++i_0__) { + level2_choice[i_0__][i_1__] = vals_i__[pos__++]; + } + } + current_statement_begin__ = 22; + validate_non_negative_index("reward", "N", N); + validate_non_negative_index("reward", "T", T); + context__.validate_dims("data initialization", "reward", "int", context__.to_vec(N,T)); + validate_non_negative_index("reward", "N", N); + validate_non_negative_index("reward", "T", T); + reward = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("reward"); + pos__ = 0; + size_t reward_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { + size_t reward_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { + reward[i_0__][i_1__] = vals_i__[pos__++]; + } + } + current_statement_begin__ = 23; + context__.validate_dims("data initialization", "trans_prob", "double", context__.to_vec()); + trans_prob = double(0); + vals_r__ = context__.vals_r("trans_prob"); + pos__ = 0; + trans_prob = vals_r__[pos__++]; + + // validate, data variables + current_statement_begin__ = 17; + check_greater_or_equal(function__,"N",N,1); + current_statement_begin__ = 18; + check_greater_or_equal(function__,"T",T,1); + current_statement_begin__ = 19; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); + check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); + } + current_statement_begin__ = 20; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],1); + check_less_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],2); + } + } + current_statement_begin__ = 21; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],1); + check_less_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],4); + } + } + current_statement_begin__ = 22; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],0); + check_less_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],1); + } + } + current_statement_begin__ = 23; + check_greater_or_equal(function__,"trans_prob",trans_prob,0); + check_less_or_equal(function__,"trans_prob",trans_prob,1); + // initialize data variables + + + // validate transformed data + + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 30; + validate_non_negative_index("mu_pr", "6", 6); + num_params_r__ += 6; + current_statement_begin__ = 31; + validate_non_negative_index("sigma", "6", 6); + num_params_r__ += 6; + current_statement_begin__ = 34; + validate_non_negative_index("a1_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 35; + validate_non_negative_index("beta1_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 36; + validate_non_negative_index("a2_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 37; + validate_non_negative_index("beta2_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 38; + validate_non_negative_index("pi_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 39; + validate_non_negative_index("w_pr", "N", N); + num_params_r__ += N; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + ~model_ts_par6() { } + + + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + stan::io::writer writer__(params_r__,params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + + if (!(context__.contains_r("mu_pr"))) + throw std::runtime_error("variable mu_pr missing"); + vals_r__ = context__.vals_r("mu_pr"); + pos__ = 0U; + validate_non_negative_index("mu_pr", "6", 6); + context__.validate_dims("initialization", "mu_pr", "vector_d", context__.to_vec(6)); + vector_d mu_pr(static_cast(6)); + for (int j1__ = 0U; j1__ < 6; ++j1__) + mu_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(mu_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable mu_pr: ") + e.what()); + } + + if (!(context__.contains_r("sigma"))) + throw std::runtime_error("variable sigma missing"); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "6", 6); + context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); + vector_d sigma(static_cast(6)); + for (int j1__ = 0U; j1__ < 6; ++j1__) + sigma(j1__) = vals_r__[pos__++]; + try { + writer__.vector_lb_unconstrain(0,sigma); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); + } + + if (!(context__.contains_r("a1_pr"))) + throw std::runtime_error("variable a1_pr missing"); + vals_r__ = context__.vals_r("a1_pr"); + pos__ = 0U; + validate_non_negative_index("a1_pr", "N", N); + context__.validate_dims("initialization", "a1_pr", "vector_d", context__.to_vec(N)); + vector_d a1_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + a1_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(a1_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable a1_pr: ") + e.what()); + } + + if (!(context__.contains_r("beta1_pr"))) + throw std::runtime_error("variable beta1_pr missing"); + vals_r__ = context__.vals_r("beta1_pr"); + pos__ = 0U; + validate_non_negative_index("beta1_pr", "N", N); + context__.validate_dims("initialization", "beta1_pr", "vector_d", context__.to_vec(N)); + vector_d beta1_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + beta1_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(beta1_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable beta1_pr: ") + e.what()); + } + + if (!(context__.contains_r("a2_pr"))) + throw std::runtime_error("variable a2_pr missing"); + vals_r__ = context__.vals_r("a2_pr"); + pos__ = 0U; + validate_non_negative_index("a2_pr", "N", N); + context__.validate_dims("initialization", "a2_pr", "vector_d", context__.to_vec(N)); + vector_d a2_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + a2_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(a2_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable a2_pr: ") + e.what()); + } + + if (!(context__.contains_r("beta2_pr"))) + throw std::runtime_error("variable beta2_pr missing"); + vals_r__ = context__.vals_r("beta2_pr"); + pos__ = 0U; + validate_non_negative_index("beta2_pr", "N", N); + context__.validate_dims("initialization", "beta2_pr", "vector_d", context__.to_vec(N)); + vector_d beta2_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + beta2_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(beta2_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable beta2_pr: ") + e.what()); + } + + if (!(context__.contains_r("pi_pr"))) + throw std::runtime_error("variable pi_pr missing"); + vals_r__ = context__.vals_r("pi_pr"); + pos__ = 0U; + validate_non_negative_index("pi_pr", "N", N); + context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); + vector_d pi_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + pi_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(pi_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); + } + + if (!(context__.contains_r("w_pr"))) + throw std::runtime_error("variable w_pr missing"); + vals_r__ = context__.vals_r("w_pr"); + pos__ = 0U; + validate_non_negative_index("w_pr", "N", N); + context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); + vector_d w_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + w_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(w_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); + } + + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + + + template + T__ log_prob(vector& params_r__, + vector& params_i__, + std::ostream* pstream__ = 0) const { + + typedef T__ local_scalar_t__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + + try { + // model parameters + stan::io::reader in__(params_r__,params_i__); + + Eigen::Matrix mu_pr; + (void) mu_pr; // dummy to suppress unused var warning + if (jacobian__) + mu_pr = in__.vector_constrain(6,lp__); + else + mu_pr = in__.vector_constrain(6); + + Eigen::Matrix sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.vector_lb_constrain(0,6,lp__); + else + sigma = in__.vector_lb_constrain(0,6); + + Eigen::Matrix a1_pr; + (void) a1_pr; // dummy to suppress unused var warning + if (jacobian__) + a1_pr = in__.vector_constrain(N,lp__); + else + a1_pr = in__.vector_constrain(N); + + Eigen::Matrix beta1_pr; + (void) beta1_pr; // dummy to suppress unused var warning + if (jacobian__) + beta1_pr = in__.vector_constrain(N,lp__); + else + beta1_pr = in__.vector_constrain(N); + + Eigen::Matrix a2_pr; + (void) a2_pr; // dummy to suppress unused var warning + if (jacobian__) + a2_pr = in__.vector_constrain(N,lp__); + else + a2_pr = in__.vector_constrain(N); + + Eigen::Matrix beta2_pr; + (void) beta2_pr; // dummy to suppress unused var warning + if (jacobian__) + beta2_pr = in__.vector_constrain(N,lp__); + else + beta2_pr = in__.vector_constrain(N); + + Eigen::Matrix pi_pr; + (void) pi_pr; // dummy to suppress unused var warning + if (jacobian__) + pi_pr = in__.vector_constrain(N,lp__); + else + pi_pr = in__.vector_constrain(N); + + Eigen::Matrix w_pr; + (void) w_pr; // dummy to suppress unused var warning + if (jacobian__) + w_pr = in__.vector_constrain(N,lp__); + else + w_pr = in__.vector_constrain(N); + + + // transformed parameters + current_statement_begin__ = 43; + validate_non_negative_index("a1", "N", N); + Eigen::Matrix a1(static_cast(N)); + (void) a1; // dummy to suppress unused var warning + + stan::math::initialize(a1, DUMMY_VAR__); + stan::math::fill(a1,DUMMY_VAR__); + current_statement_begin__ = 44; + validate_non_negative_index("beta1", "N", N); + Eigen::Matrix beta1(static_cast(N)); + (void) beta1; // dummy to suppress unused var warning + + stan::math::initialize(beta1, DUMMY_VAR__); + stan::math::fill(beta1,DUMMY_VAR__); + current_statement_begin__ = 45; + validate_non_negative_index("a2", "N", N); + Eigen::Matrix a2(static_cast(N)); + (void) a2; // dummy to suppress unused var warning + + stan::math::initialize(a2, DUMMY_VAR__); + stan::math::fill(a2,DUMMY_VAR__); + current_statement_begin__ = 46; + validate_non_negative_index("beta2", "N", N); + Eigen::Matrix beta2(static_cast(N)); + (void) beta2; // dummy to suppress unused var warning + + stan::math::initialize(beta2, DUMMY_VAR__); + stan::math::fill(beta2,DUMMY_VAR__); + current_statement_begin__ = 47; + validate_non_negative_index("pi", "N", N); + Eigen::Matrix pi(static_cast(N)); + (void) pi; // dummy to suppress unused var warning + + stan::math::initialize(pi, DUMMY_VAR__); + stan::math::fill(pi,DUMMY_VAR__); + current_statement_begin__ = 48; + validate_non_negative_index("w", "N", N); + Eigen::Matrix w(static_cast(N)); + (void) w; // dummy to suppress unused var warning + + stan::math::initialize(w, DUMMY_VAR__); + stan::math::fill(w,DUMMY_VAR__); + + + current_statement_begin__ = 50; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 51; + stan::model::assign(a1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), + "assigning variable a1"); + current_statement_begin__ = 52; + stan::model::assign(beta1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), + "assigning variable beta1"); + current_statement_begin__ = 53; + stan::model::assign(a2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), + "assigning variable a2"); + current_statement_begin__ = 54; + stan::model::assign(beta2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp((get_base1(mu_pr,4,"mu_pr",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), + "assigning variable beta2"); + current_statement_begin__ = 55; + stan::model::assign(pi, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,5,"mu_pr",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), + "assigning variable pi"); + current_statement_begin__ = 56; + stan::model::assign(w, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,6,"mu_pr",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), + "assigning variable w"); + } + + // validate transformed parameters + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(a1(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: a1" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(beta1(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: beta1" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(a2(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: a2" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(beta2(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: beta2" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(pi(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(w(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 43; + check_greater_or_equal(function__,"a1",a1,0); + check_less_or_equal(function__,"a1",a1,1); + current_statement_begin__ = 44; + check_greater_or_equal(function__,"beta1",beta1,0); + current_statement_begin__ = 45; + check_greater_or_equal(function__,"a2",a2,0); + check_less_or_equal(function__,"a2",a2,1); + current_statement_begin__ = 46; + check_greater_or_equal(function__,"beta2",beta2,0); + current_statement_begin__ = 47; + check_greater_or_equal(function__,"pi",pi,0); + check_less_or_equal(function__,"pi",pi,5); + current_statement_begin__ = 48; + check_greater_or_equal(function__,"w",w,0); + check_less_or_equal(function__,"w",w,1); + + // model body + + current_statement_begin__ = 61; + lp_accum__.add(normal_log(mu_pr, 0, 1)); + current_statement_begin__ = 62; + lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); + current_statement_begin__ = 65; + lp_accum__.add(normal_log(a1_pr, 0, 1)); + current_statement_begin__ = 66; + lp_accum__.add(normal_log(beta1_pr, 0, 1)); + current_statement_begin__ = 67; + lp_accum__.add(normal_log(a2_pr, 0, 1)); + current_statement_begin__ = 68; + lp_accum__.add(normal_log(beta2_pr, 0, 1)); + current_statement_begin__ = 69; + lp_accum__.add(normal_log(pi_pr, 0, 1)); + current_statement_begin__ = 70; + lp_accum__.add(normal_log(w_pr, 0, 1)); + current_statement_begin__ = 72; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 74; + validate_non_negative_index("v_mb", "2", 2); + Eigen::Matrix v_mb(static_cast(2)); + (void) v_mb; // dummy to suppress unused var warning + + stan::math::initialize(v_mb, DUMMY_VAR__); + stan::math::fill(v_mb,DUMMY_VAR__); + current_statement_begin__ = 75; + validate_non_negative_index("v_mf", "6", 6); + Eigen::Matrix v_mf(static_cast(6)); + (void) v_mf; // dummy to suppress unused var warning + + stan::math::initialize(v_mf, DUMMY_VAR__); + stan::math::fill(v_mf,DUMMY_VAR__); + current_statement_begin__ = 76; + validate_non_negative_index("v_hybrid", "2", 2); + Eigen::Matrix v_hybrid(static_cast(2)); + (void) v_hybrid; // dummy to suppress unused var warning + + stan::math::initialize(v_hybrid, DUMMY_VAR__); + stan::math::fill(v_hybrid,DUMMY_VAR__); + current_statement_begin__ = 77; + local_scalar_t__ level1_prob_choice2; + (void) level1_prob_choice2; // dummy to suppress unused var warning + + stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); + stan::math::fill(level1_prob_choice2,DUMMY_VAR__); + current_statement_begin__ = 78; + local_scalar_t__ level2_prob_choice2; + (void) level2_prob_choice2; // dummy to suppress unused var warning + + stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); + stan::math::fill(level2_prob_choice2,DUMMY_VAR__); + current_statement_begin__ = 79; + int level1_choice_01(0); + (void) level1_choice_01; // dummy to suppress unused var warning + + stan::math::fill(level1_choice_01, std::numeric_limits::min()); + current_statement_begin__ = 80; + int level2_choice_01(0); + (void) level2_choice_01; // dummy to suppress unused var warning + + stan::math::fill(level2_choice_01, std::numeric_limits::min()); + + + current_statement_begin__ = 83; + stan::math::assign(v_mb, rep_vector(0.0,2)); + current_statement_begin__ = 84; + stan::math::assign(v_mf, rep_vector(0.0,6)); + current_statement_begin__ = 85; + stan::math::assign(v_hybrid, rep_vector(0.0,2)); + current_statement_begin__ = 87; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 89; + stan::model::assign(v_mb, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), + "assigning variable v_mb"); + current_statement_begin__ = 90; + stan::model::assign(v_mb, + stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), + (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), + "assigning variable v_mb"); + current_statement_begin__ = 93; + stan::model::assign(v_hybrid, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), + "assigning variable v_hybrid"); + current_statement_begin__ = 94; + stan::model::assign(v_hybrid, + stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), + ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), + "assigning variable v_hybrid"); + current_statement_begin__ = 98; + stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); + current_statement_begin__ = 99; + if (as_bool(logical_eq(t,1))) { + + current_statement_begin__ = 100; + stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); + } else { + + current_statement_begin__ = 102; + stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); + } + current_statement_begin__ = 104; + lp_accum__.add(bernoulli_log(level1_choice_01, level1_prob_choice2)); + current_statement_begin__ = 107; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), "v_mf") + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1)))), + "assigning variable v_mf"); + current_statement_begin__ = 110; + stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); + current_statement_begin__ = 111; + if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { + + current_statement_begin__ = 112; + stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); + } else { + + current_statement_begin__ = 114; + stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); + } + current_statement_begin__ = 116; + lp_accum__.add(bernoulli_log(level2_choice_01, level2_prob_choice2)); + current_statement_begin__ = 120; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), "v_mf") + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1)))), + "assigning variable v_mf"); + current_statement_begin__ = 123; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), "v_mf") + (get_base1(a1,i,"a1",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1)))), + "assigning variable v_mf"); + } + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + + lp_accum__.add(lp__); + return lp_accum__.sum(); + + } // log_prob() + + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + + + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("mu_pr"); + names__.push_back("sigma"); + names__.push_back("a1_pr"); + names__.push_back("beta1_pr"); + names__.push_back("a2_pr"); + names__.push_back("beta2_pr"); + names__.push_back("pi_pr"); + names__.push_back("w_pr"); + names__.push_back("a1"); + names__.push_back("beta1"); + names__.push_back("a2"); + names__.push_back("beta2"); + names__.push_back("pi"); + names__.push_back("w"); + names__.push_back("mu_a1"); + names__.push_back("mu_beta1"); + names__.push_back("mu_a2"); + names__.push_back("mu_beta2"); + names__.push_back("mu_pi"); + names__.push_back("mu_w"); + names__.push_back("log_lik"); + names__.push_back("y_pred_step1"); + names__.push_back("y_pred_step2"); + } + + + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(6); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(6); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dims__.push_back(T); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dims__.push_back(T); + dimss__.push_back(dims__); + } + + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + + vars__.resize(0); + stan::io::reader in__(params_r__,params_i__); + static const char* function__ = "model_ts_par6_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + vector_d mu_pr = in__.vector_constrain(6); + vector_d sigma = in__.vector_lb_constrain(0,6); + vector_d a1_pr = in__.vector_constrain(N); + vector_d beta1_pr = in__.vector_constrain(N); + vector_d a2_pr = in__.vector_constrain(N); + vector_d beta2_pr = in__.vector_constrain(N); + vector_d pi_pr = in__.vector_constrain(N); + vector_d w_pr = in__.vector_constrain(N); + for (int k_0__ = 0; k_0__ < 6; ++k_0__) { + vars__.push_back(mu_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < 6; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(a1_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta1_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(a2_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta2_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(pi_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(w_pr[k_0__]); + } + + // declare and define transformed parameters + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + try { + current_statement_begin__ = 43; + validate_non_negative_index("a1", "N", N); + Eigen::Matrix a1(static_cast(N)); + (void) a1; // dummy to suppress unused var warning + + stan::math::initialize(a1, DUMMY_VAR__); + stan::math::fill(a1,DUMMY_VAR__); + current_statement_begin__ = 44; + validate_non_negative_index("beta1", "N", N); + Eigen::Matrix beta1(static_cast(N)); + (void) beta1; // dummy to suppress unused var warning + + stan::math::initialize(beta1, DUMMY_VAR__); + stan::math::fill(beta1,DUMMY_VAR__); + current_statement_begin__ = 45; + validate_non_negative_index("a2", "N", N); + Eigen::Matrix a2(static_cast(N)); + (void) a2; // dummy to suppress unused var warning + + stan::math::initialize(a2, DUMMY_VAR__); + stan::math::fill(a2,DUMMY_VAR__); + current_statement_begin__ = 46; + validate_non_negative_index("beta2", "N", N); + Eigen::Matrix beta2(static_cast(N)); + (void) beta2; // dummy to suppress unused var warning + + stan::math::initialize(beta2, DUMMY_VAR__); + stan::math::fill(beta2,DUMMY_VAR__); + current_statement_begin__ = 47; + validate_non_negative_index("pi", "N", N); + Eigen::Matrix pi(static_cast(N)); + (void) pi; // dummy to suppress unused var warning + + stan::math::initialize(pi, DUMMY_VAR__); + stan::math::fill(pi,DUMMY_VAR__); + current_statement_begin__ = 48; + validate_non_negative_index("w", "N", N); + Eigen::Matrix w(static_cast(N)); + (void) w; // dummy to suppress unused var warning + + stan::math::initialize(w, DUMMY_VAR__); + stan::math::fill(w,DUMMY_VAR__); + + + current_statement_begin__ = 50; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 51; + stan::model::assign(a1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), + "assigning variable a1"); + current_statement_begin__ = 52; + stan::model::assign(beta1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), + "assigning variable beta1"); + current_statement_begin__ = 53; + stan::model::assign(a2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), + "assigning variable a2"); + current_statement_begin__ = 54; + stan::model::assign(beta2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp((get_base1(mu_pr,4,"mu_pr",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), + "assigning variable beta2"); + current_statement_begin__ = 55; + stan::model::assign(pi, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,5,"mu_pr",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), + "assigning variable pi"); + current_statement_begin__ = 56; + stan::model::assign(w, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,6,"mu_pr",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), + "assigning variable w"); + } + + // validate transformed parameters + current_statement_begin__ = 43; + check_greater_or_equal(function__,"a1",a1,0); + check_less_or_equal(function__,"a1",a1,1); + current_statement_begin__ = 44; + check_greater_or_equal(function__,"beta1",beta1,0); + current_statement_begin__ = 45; + check_greater_or_equal(function__,"a2",a2,0); + check_less_or_equal(function__,"a2",a2,1); + current_statement_begin__ = 46; + check_greater_or_equal(function__,"beta2",beta2,0); + current_statement_begin__ = 47; + check_greater_or_equal(function__,"pi",pi,0); + check_less_or_equal(function__,"pi",pi,5); + current_statement_begin__ = 48; + check_greater_or_equal(function__,"w",w,0); + check_less_or_equal(function__,"w",w,1); + + // write transformed parameters + if (include_tparams__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(a1[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta1[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(a2[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta2[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(pi[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(w[k_0__]); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 131; + local_scalar_t__ mu_a1; + (void) mu_a1; // dummy to suppress unused var warning + + stan::math::initialize(mu_a1, DUMMY_VAR__); + stan::math::fill(mu_a1,DUMMY_VAR__); + current_statement_begin__ = 132; + local_scalar_t__ mu_beta1; + (void) mu_beta1; // dummy to suppress unused var warning + + stan::math::initialize(mu_beta1, DUMMY_VAR__); + stan::math::fill(mu_beta1,DUMMY_VAR__); + current_statement_begin__ = 133; + local_scalar_t__ mu_a2; + (void) mu_a2; // dummy to suppress unused var warning + + stan::math::initialize(mu_a2, DUMMY_VAR__); + stan::math::fill(mu_a2,DUMMY_VAR__); + current_statement_begin__ = 134; + local_scalar_t__ mu_beta2; + (void) mu_beta2; // dummy to suppress unused var warning + + stan::math::initialize(mu_beta2, DUMMY_VAR__); + stan::math::fill(mu_beta2,DUMMY_VAR__); + current_statement_begin__ = 135; + local_scalar_t__ mu_pi; + (void) mu_pi; // dummy to suppress unused var warning + + stan::math::initialize(mu_pi, DUMMY_VAR__); + stan::math::fill(mu_pi,DUMMY_VAR__); + current_statement_begin__ = 136; + local_scalar_t__ mu_w; + (void) mu_w; // dummy to suppress unused var warning + + stan::math::initialize(mu_w, DUMMY_VAR__); + stan::math::fill(mu_w,DUMMY_VAR__); + current_statement_begin__ = 139; + validate_non_negative_index("log_lik", "N", N); + vector log_lik(N); + stan::math::initialize(log_lik, DUMMY_VAR__); + stan::math::fill(log_lik,DUMMY_VAR__); + current_statement_begin__ = 142; + validate_non_negative_index("y_pred_step1", "N", N); + validate_non_negative_index("y_pred_step1", "T", T); + vector > y_pred_step1(N, (vector(T))); + stan::math::initialize(y_pred_step1, DUMMY_VAR__); + stan::math::fill(y_pred_step1,DUMMY_VAR__); + current_statement_begin__ = 143; + validate_non_negative_index("y_pred_step2", "N", N); + validate_non_negative_index("y_pred_step2", "T", T); + vector > y_pred_step2(N, (vector(T))); + stan::math::initialize(y_pred_step2, DUMMY_VAR__); + stan::math::fill(y_pred_step2,DUMMY_VAR__); + + + current_statement_begin__ = 146; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 147; + for (int t = 1; t <= T; ++t) { + + current_statement_begin__ = 148; + stan::model::assign(y_pred_step1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + -(1), + "assigning variable y_pred_step1"); + current_statement_begin__ = 149; + stan::model::assign(y_pred_step2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + -(1), + "assigning variable y_pred_step2"); + } + } + current_statement_begin__ = 154; + stan::math::assign(mu_a1, Phi_approx(get_base1(mu_pr,1,"mu_pr",1))); + current_statement_begin__ = 155; + stan::math::assign(mu_beta1, stan::math::exp(get_base1(mu_pr,2,"mu_pr",1))); + current_statement_begin__ = 156; + stan::math::assign(mu_a2, Phi_approx(get_base1(mu_pr,3,"mu_pr",1))); + current_statement_begin__ = 157; + stan::math::assign(mu_beta2, stan::math::exp(get_base1(mu_pr,4,"mu_pr",1))); + current_statement_begin__ = 158; + stan::math::assign(mu_pi, (Phi_approx(get_base1(mu_pr,5,"mu_pr",1)) * 5)); + current_statement_begin__ = 159; + stan::math::assign(mu_w, Phi_approx(get_base1(mu_pr,6,"mu_pr",1))); + + current_statement_begin__ = 162; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 164; + validate_non_negative_index("v_mb", "2", 2); + Eigen::Matrix v_mb(static_cast(2)); + (void) v_mb; // dummy to suppress unused var warning + + stan::math::initialize(v_mb, DUMMY_VAR__); + stan::math::fill(v_mb,DUMMY_VAR__); + current_statement_begin__ = 165; + validate_non_negative_index("v_mf", "6", 6); + Eigen::Matrix v_mf(static_cast(6)); + (void) v_mf; // dummy to suppress unused var warning + + stan::math::initialize(v_mf, DUMMY_VAR__); + stan::math::fill(v_mf,DUMMY_VAR__); + current_statement_begin__ = 166; + validate_non_negative_index("v_hybrid", "2", 2); + Eigen::Matrix v_hybrid(static_cast(2)); + (void) v_hybrid; // dummy to suppress unused var warning + + stan::math::initialize(v_hybrid, DUMMY_VAR__); + stan::math::fill(v_hybrid,DUMMY_VAR__); + current_statement_begin__ = 167; + local_scalar_t__ level1_prob_choice2; + (void) level1_prob_choice2; // dummy to suppress unused var warning + + stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); + stan::math::fill(level1_prob_choice2,DUMMY_VAR__); + current_statement_begin__ = 168; + local_scalar_t__ level2_prob_choice2; + (void) level2_prob_choice2; // dummy to suppress unused var warning + + stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); + stan::math::fill(level2_prob_choice2,DUMMY_VAR__); + current_statement_begin__ = 169; + int level1_choice_01(0); + (void) level1_choice_01; // dummy to suppress unused var warning + + stan::math::fill(level1_choice_01, std::numeric_limits::min()); + current_statement_begin__ = 170; + int level2_choice_01(0); + (void) level2_choice_01; // dummy to suppress unused var warning + + stan::math::fill(level2_choice_01, std::numeric_limits::min()); + + + current_statement_begin__ = 173; + stan::math::assign(v_mb, rep_vector(0.0,2)); + current_statement_begin__ = 174; + stan::math::assign(v_mf, rep_vector(0.0,6)); + current_statement_begin__ = 175; + stan::math::assign(v_hybrid, rep_vector(0.0,2)); + current_statement_begin__ = 177; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + 0, + "assigning variable log_lik"); + current_statement_begin__ = 179; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 181; + stan::model::assign(v_mb, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), + "assigning variable v_mb"); + current_statement_begin__ = 182; + stan::model::assign(v_mb, + stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), + (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), + "assigning variable v_mb"); + current_statement_begin__ = 185; + stan::model::assign(v_hybrid, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), + "assigning variable v_hybrid"); + current_statement_begin__ = 186; + stan::model::assign(v_hybrid, + stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), + ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), + "assigning variable v_hybrid"); + current_statement_begin__ = 190; + stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); + current_statement_begin__ = 191; + if (as_bool(logical_eq(t,1))) { + + current_statement_begin__ = 192; + stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); + } else { + + current_statement_begin__ = 194; + stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); + } + current_statement_begin__ = 196; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_log(level1_choice_01,level1_prob_choice2)), + "assigning variable log_lik"); + current_statement_begin__ = 199; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), "v_mf") + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1)))), + "assigning variable v_mf"); + current_statement_begin__ = 202; + stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); + current_statement_begin__ = 204; + if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { + + current_statement_begin__ = 205; + stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); + } else { + + current_statement_begin__ = 207; + stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); + } + current_statement_begin__ = 209; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_log(level2_choice_01,level2_prob_choice2)), + "assigning variable log_lik"); + current_statement_begin__ = 212; + stan::model::assign(y_pred_step1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + bernoulli_rng(level1_prob_choice2, base_rng__), + "assigning variable y_pred_step1"); + current_statement_begin__ = 213; + stan::model::assign(y_pred_step2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + bernoulli_rng(level2_prob_choice2, base_rng__), + "assigning variable y_pred_step2"); + current_statement_begin__ = 217; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), "v_mf") + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1)))), + "assigning variable v_mf"); + current_statement_begin__ = 220; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), "v_mf") + (get_base1(a1,i,"a1",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1)))), + "assigning variable v_mf"); + } + } + } + + // validate generated quantities + current_statement_begin__ = 131; + check_greater_or_equal(function__,"mu_a1",mu_a1,0); + check_less_or_equal(function__,"mu_a1",mu_a1,1); + current_statement_begin__ = 132; + check_greater_or_equal(function__,"mu_beta1",mu_beta1,0); + current_statement_begin__ = 133; + check_greater_or_equal(function__,"mu_a2",mu_a2,0); + check_less_or_equal(function__,"mu_a2",mu_a2,1); + current_statement_begin__ = 134; + check_greater_or_equal(function__,"mu_beta2",mu_beta2,0); + current_statement_begin__ = 135; + check_greater_or_equal(function__,"mu_pi",mu_pi,0); + check_less_or_equal(function__,"mu_pi",mu_pi,5); + current_statement_begin__ = 136; + check_greater_or_equal(function__,"mu_w",mu_w,0); + check_less_or_equal(function__,"mu_w",mu_w,1); + current_statement_begin__ = 139; + current_statement_begin__ = 142; + current_statement_begin__ = 143; + + // write generated quantities + vars__.push_back(mu_a1); + vars__.push_back(mu_beta1); + vars__.push_back(mu_a2); + vars__.push_back(mu_beta2); + vars__.push_back(mu_pi); + vars__.push_back(mu_w); + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(log_lik[k_0__]); + } + for (int k_1__ = 0; k_1__ < T; ++k_1__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(y_pred_step1[k_0__][k_1__]); + } + } + for (int k_1__ = 0; k_1__ < T; ++k_1__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(y_pred_step2[k_0__][k_1__]); + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + + static std::string model_name() { + return "model_ts_par6"; + } + + + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a1_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta1_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a2_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta2_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pi_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a1" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta1" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a2" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta2" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pi" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_a1"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta1"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_a2"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta2"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pi"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_w"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + + + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a1_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta1_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a2_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta2_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pi_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a1" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta1" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a2" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta2" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pi" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_a1"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta1"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_a2"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta2"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pi"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_w"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + +}; // model + +} + +typedef model_ts_par6_namespace::model_ts_par6 stan_model; + + +#endif diff --git a/inst/stan_files/ts_par6.o b/inst/stan_files/ts_par6.o new file mode 100644 index 00000000..64bedd71 Binary files /dev/null and b/inst/stan_files/ts_par6.o differ diff --git a/exec/ts_par6.stan b/inst/stan_files/ts_par6.stan old mode 100755 new mode 100644 similarity index 82% rename from exec/ts_par6.stan rename to inst/stan_files/ts_par6.stan index 671a31f0..b472afa0 --- a/exec/ts_par6.stan +++ b/inst/stan_files/ts_par6.stan @@ -1,3 +1,5 @@ +#include /pre/license.stan + data { int N; int T; @@ -12,7 +14,7 @@ transformed data { parameters { // Declare all parameters as vectors for vectorizing // Hyper(group)-parameters - vector[6] mu_p; + vector[6] mu_pr; vector[6] sigma; // Subject-level raw parameters (for Matt trick) @@ -33,17 +35,17 @@ transformed parameters { vector[N] w; for (i in 1:N) { - a1[i] = Phi_approx( mu_p[1] + sigma[1] * a1_pr[i] ); - beta1[i] = exp( mu_p[2] + sigma[2] * beta1_pr[i] ); - a2[i] = Phi_approx( mu_p[3] + sigma[3] * a2_pr[i] ); - beta2[i] = exp( mu_p[4] + sigma[4] * beta2_pr[i] ); - pi[i] = Phi_approx( mu_p[5] + sigma[5] * pi_pr[i] ) * 5; - w[i] = Phi_approx( mu_p[6] + sigma[6] * w_pr[i] ); + a1[i] = Phi_approx( mu_pr[1] + sigma[1] * a1_pr[i] ); + beta1[i] = exp( mu_pr[2] + sigma[2] * beta1_pr[i] ); + a2[i] = Phi_approx( mu_pr[3] + sigma[3] * a2_pr[i] ); + beta2[i] = exp( mu_pr[4] + sigma[4] * beta2_pr[i] ); + pi[i] = Phi_approx( mu_pr[5] + sigma[5] * pi_pr[i] ) * 5; + w[i] = Phi_approx( mu_pr[6] + sigma[6] * w_pr[i] ); } } model { // Hyperparameters - mu_p ~ normal(0, 1); + mu_pr ~ normal(0, 1); sigma ~ normal(0, 0.2); // individual parameters @@ -89,7 +91,7 @@ model { level1_choice_01 ~ bernoulli( level1_prob_choice2 ); // level 1, prob. of choosing 2 in level 1 // Observe Level2 and update Level1 of the chosen option - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a1[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); + v_mf[level1_choice[i,t]] += a1[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); // Prob of choosing stim 2 (2 from [1,2] OR 4 from [3,4]) in ** Level (step) 2 ** level2_choice_01 = 1 - modulus(level2_choice[i,t], 2); // 1,3 --> 0; 2,4 --> 1 @@ -102,10 +104,10 @@ model { // After observing the reward at Level 2... // Update Level 2 v_mf of the chosen option. Level 2--> choose one of level 2 options and observe reward - v_mf[2+ level2_choice[i,t]] = v_mf[2+ level2_choice[i,t]] + a2[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); + v_mf[2+ level2_choice[i,t]] += a2[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); // Update Level 1 v_mf - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a1[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); + v_mf[level1_choice[i,t]] += a1[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); } // end of t loop } // end of i loop @@ -136,12 +138,12 @@ generated quantities { } // Generate group level parameter values - mu_a1 = Phi_approx( mu_p[1] ); - mu_beta1 = exp( mu_p[2] ); - mu_a2 = Phi_approx( mu_p[3] ); - mu_beta2 = exp( mu_p[4] ); - mu_pi = Phi_approx( mu_p[5] ) * 5; - mu_w = Phi_approx( mu_p[6] ); + mu_a1 = Phi_approx( mu_pr[1] ); + mu_beta1 = exp( mu_pr[2] ); + mu_a2 = Phi_approx( mu_pr[3] ); + mu_beta2 = exp( mu_pr[4] ); + mu_pi = Phi_approx( mu_pr[5] ) * 5; + mu_w = Phi_approx( mu_pr[6] ); { // local section, this saves time and space for (i in 1:N) { @@ -178,10 +180,10 @@ generated quantities { } else{ level1_prob_choice2 = inv_logit( beta1[i]*(v_hybrid[2]-v_hybrid[1]) + pi[i]*(2*level1_choice[i,t-1] -3) ); } - log_lik[i] = log_lik[i] + bernoulli_lpmf( level1_choice_01 | level1_prob_choice2 ); + log_lik[i] += bernoulli_lpmf( level1_choice_01 | level1_prob_choice2 ); // Observe Level2 and update Level1 of the chosen option - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a1[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); + v_mf[level1_choice[i,t]] += a1[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); // Prob of choosing stim 2 (2 from [1,2] OR 4 from [3,4]) in ** Level (step) 2 ** level2_choice_01 = 1 - modulus(level2_choice[i,t], 2); // 1,3 --> 0; 2,4 @@ -191,7 +193,7 @@ generated quantities { } else { // level2_choice = 1 or 2 level2_prob_choice2 = inv_logit( beta2[i]*( v_mf[4] - v_mf[3] ) ); } - log_lik[i] = log_lik[i] + bernoulli_lpmf( level2_choice_01 | level2_prob_choice2 ); + log_lik[i] += bernoulli_lpmf( level2_choice_01 | level2_prob_choice2 ); // generate posterior prediction for current trial y_pred_step1[i,t] = bernoulli_rng(level1_prob_choice2); @@ -199,12 +201,13 @@ generated quantities { // After observing the reward at Level 2... // Update Level 2 v_mf of the chosen option. Level 2--> choose one of level 2 options and observe reward - v_mf[2+ level2_choice[i,t]] = v_mf[2+ level2_choice[i,t]] + a2[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); + v_mf[2+ level2_choice[i,t]] += a2[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); // Update Level 1 v_mf - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a1[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); + v_mf[level1_choice[i,t]] += a1[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); } // end of t loop } // end of i loop - } - } + } +} + diff --git a/inst/stan_files/ts_par7.hpp b/inst/stan_files/ts_par7.hpp new file mode 100644 index 00000000..2bd85227 --- /dev/null +++ b/inst/stan_files/ts_par7.hpp @@ -0,0 +1,1723 @@ +/* + hBayesDM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + hBayesDM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with hBayesDM. If not, see . +*/ +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.18.0 + +#include + +namespace model_ts_par7_namespace { + +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; + +static int current_statement_begin__; + +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_ts_par7"); + reader.add_event(0, 0, "include", "/pre/license.stan"); + reader.add_event(0, 0, "start", "/pre/license.stan"); + reader.add_event(14, 14, "end", "/pre/license.stan"); + reader.add_event(14, 1, "restart", "model_ts_par7"); + reader.add_event(231, 216, "end", "model_ts_par7"); + return reader; +} + +#include + class model_ts_par7 : public prob_grad { +private: + int N; + int T; + vector Tsubj; + vector > level1_choice; + vector > level2_choice; + vector > reward; + double trans_prob; +public: + model_ts_par7(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, 0, pstream__); + } + + model_ts_par7(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, random_seed__, pstream__); + } + + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + + current_statement_begin__ = -1; + + static const char* function__ = "model_ts_par7_namespace::model_ts_par7"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + // initialize member variables + try { + current_statement_begin__ = 17; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + current_statement_begin__ = 18; + context__.validate_dims("data initialization", "T", "int", context__.to_vec()); + T = int(0); + vals_i__ = context__.vals_i("T"); + pos__ = 0; + T = vals_i__[pos__++]; + current_statement_begin__ = 19; + validate_non_negative_index("Tsubj", "N", N); + context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); + validate_non_negative_index("Tsubj", "N", N); + Tsubj = std::vector(N,int(0)); + vals_i__ = context__.vals_i("Tsubj"); + pos__ = 0; + size_t Tsubj_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { + Tsubj[i_0__] = vals_i__[pos__++]; + } + current_statement_begin__ = 20; + validate_non_negative_index("level1_choice", "N", N); + validate_non_negative_index("level1_choice", "T", T); + context__.validate_dims("data initialization", "level1_choice", "int", context__.to_vec(N,T)); + validate_non_negative_index("level1_choice", "N", N); + validate_non_negative_index("level1_choice", "T", T); + level1_choice = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("level1_choice"); + pos__ = 0; + size_t level1_choice_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < level1_choice_limit_1__; ++i_1__) { + size_t level1_choice_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < level1_choice_limit_0__; ++i_0__) { + level1_choice[i_0__][i_1__] = vals_i__[pos__++]; + } + } + current_statement_begin__ = 21; + validate_non_negative_index("level2_choice", "N", N); + validate_non_negative_index("level2_choice", "T", T); + context__.validate_dims("data initialization", "level2_choice", "int", context__.to_vec(N,T)); + validate_non_negative_index("level2_choice", "N", N); + validate_non_negative_index("level2_choice", "T", T); + level2_choice = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("level2_choice"); + pos__ = 0; + size_t level2_choice_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < level2_choice_limit_1__; ++i_1__) { + size_t level2_choice_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < level2_choice_limit_0__; ++i_0__) { + level2_choice[i_0__][i_1__] = vals_i__[pos__++]; + } + } + current_statement_begin__ = 22; + validate_non_negative_index("reward", "N", N); + validate_non_negative_index("reward", "T", T); + context__.validate_dims("data initialization", "reward", "int", context__.to_vec(N,T)); + validate_non_negative_index("reward", "N", N); + validate_non_negative_index("reward", "T", T); + reward = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("reward"); + pos__ = 0; + size_t reward_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { + size_t reward_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { + reward[i_0__][i_1__] = vals_i__[pos__++]; + } + } + current_statement_begin__ = 23; + context__.validate_dims("data initialization", "trans_prob", "double", context__.to_vec()); + trans_prob = double(0); + vals_r__ = context__.vals_r("trans_prob"); + pos__ = 0; + trans_prob = vals_r__[pos__++]; + + // validate, data variables + current_statement_begin__ = 17; + check_greater_or_equal(function__,"N",N,1); + current_statement_begin__ = 18; + check_greater_or_equal(function__,"T",T,1); + current_statement_begin__ = 19; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); + check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); + } + current_statement_begin__ = 20; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],1); + check_less_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],2); + } + } + current_statement_begin__ = 21; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],1); + check_less_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],4); + } + } + current_statement_begin__ = 22; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],0); + check_less_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],1); + } + } + current_statement_begin__ = 23; + check_greater_or_equal(function__,"trans_prob",trans_prob,0); + check_less_or_equal(function__,"trans_prob",trans_prob,1); + // initialize data variables + + + // validate transformed data + + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 30; + validate_non_negative_index("mu_pr", "7", 7); + num_params_r__ += 7; + current_statement_begin__ = 31; + validate_non_negative_index("sigma", "7", 7); + num_params_r__ += 7; + current_statement_begin__ = 34; + validate_non_negative_index("a1_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 35; + validate_non_negative_index("beta1_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 36; + validate_non_negative_index("a2_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 37; + validate_non_negative_index("beta2_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 38; + validate_non_negative_index("pi_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 39; + validate_non_negative_index("w_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 40; + validate_non_negative_index("lambda_pr", "N", N); + num_params_r__ += N; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + ~model_ts_par7() { } + + + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + stan::io::writer writer__(params_r__,params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + + if (!(context__.contains_r("mu_pr"))) + throw std::runtime_error("variable mu_pr missing"); + vals_r__ = context__.vals_r("mu_pr"); + pos__ = 0U; + validate_non_negative_index("mu_pr", "7", 7); + context__.validate_dims("initialization", "mu_pr", "vector_d", context__.to_vec(7)); + vector_d mu_pr(static_cast(7)); + for (int j1__ = 0U; j1__ < 7; ++j1__) + mu_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(mu_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable mu_pr: ") + e.what()); + } + + if (!(context__.contains_r("sigma"))) + throw std::runtime_error("variable sigma missing"); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "7", 7); + context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(7)); + vector_d sigma(static_cast(7)); + for (int j1__ = 0U; j1__ < 7; ++j1__) + sigma(j1__) = vals_r__[pos__++]; + try { + writer__.vector_lb_unconstrain(0,sigma); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); + } + + if (!(context__.contains_r("a1_pr"))) + throw std::runtime_error("variable a1_pr missing"); + vals_r__ = context__.vals_r("a1_pr"); + pos__ = 0U; + validate_non_negative_index("a1_pr", "N", N); + context__.validate_dims("initialization", "a1_pr", "vector_d", context__.to_vec(N)); + vector_d a1_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + a1_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(a1_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable a1_pr: ") + e.what()); + } + + if (!(context__.contains_r("beta1_pr"))) + throw std::runtime_error("variable beta1_pr missing"); + vals_r__ = context__.vals_r("beta1_pr"); + pos__ = 0U; + validate_non_negative_index("beta1_pr", "N", N); + context__.validate_dims("initialization", "beta1_pr", "vector_d", context__.to_vec(N)); + vector_d beta1_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + beta1_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(beta1_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable beta1_pr: ") + e.what()); + } + + if (!(context__.contains_r("a2_pr"))) + throw std::runtime_error("variable a2_pr missing"); + vals_r__ = context__.vals_r("a2_pr"); + pos__ = 0U; + validate_non_negative_index("a2_pr", "N", N); + context__.validate_dims("initialization", "a2_pr", "vector_d", context__.to_vec(N)); + vector_d a2_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + a2_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(a2_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable a2_pr: ") + e.what()); + } + + if (!(context__.contains_r("beta2_pr"))) + throw std::runtime_error("variable beta2_pr missing"); + vals_r__ = context__.vals_r("beta2_pr"); + pos__ = 0U; + validate_non_negative_index("beta2_pr", "N", N); + context__.validate_dims("initialization", "beta2_pr", "vector_d", context__.to_vec(N)); + vector_d beta2_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + beta2_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(beta2_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable beta2_pr: ") + e.what()); + } + + if (!(context__.contains_r("pi_pr"))) + throw std::runtime_error("variable pi_pr missing"); + vals_r__ = context__.vals_r("pi_pr"); + pos__ = 0U; + validate_non_negative_index("pi_pr", "N", N); + context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); + vector_d pi_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + pi_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(pi_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); + } + + if (!(context__.contains_r("w_pr"))) + throw std::runtime_error("variable w_pr missing"); + vals_r__ = context__.vals_r("w_pr"); + pos__ = 0U; + validate_non_negative_index("w_pr", "N", N); + context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); + vector_d w_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + w_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(w_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); + } + + if (!(context__.contains_r("lambda_pr"))) + throw std::runtime_error("variable lambda_pr missing"); + vals_r__ = context__.vals_r("lambda_pr"); + pos__ = 0U; + validate_non_negative_index("lambda_pr", "N", N); + context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); + vector_d lambda_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + lambda_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(lambda_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); + } + + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + + + template + T__ log_prob(vector& params_r__, + vector& params_i__, + std::ostream* pstream__ = 0) const { + + typedef T__ local_scalar_t__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + + try { + // model parameters + stan::io::reader in__(params_r__,params_i__); + + Eigen::Matrix mu_pr; + (void) mu_pr; // dummy to suppress unused var warning + if (jacobian__) + mu_pr = in__.vector_constrain(7,lp__); + else + mu_pr = in__.vector_constrain(7); + + Eigen::Matrix sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.vector_lb_constrain(0,7,lp__); + else + sigma = in__.vector_lb_constrain(0,7); + + Eigen::Matrix a1_pr; + (void) a1_pr; // dummy to suppress unused var warning + if (jacobian__) + a1_pr = in__.vector_constrain(N,lp__); + else + a1_pr = in__.vector_constrain(N); + + Eigen::Matrix beta1_pr; + (void) beta1_pr; // dummy to suppress unused var warning + if (jacobian__) + beta1_pr = in__.vector_constrain(N,lp__); + else + beta1_pr = in__.vector_constrain(N); + + Eigen::Matrix a2_pr; + (void) a2_pr; // dummy to suppress unused var warning + if (jacobian__) + a2_pr = in__.vector_constrain(N,lp__); + else + a2_pr = in__.vector_constrain(N); + + Eigen::Matrix beta2_pr; + (void) beta2_pr; // dummy to suppress unused var warning + if (jacobian__) + beta2_pr = in__.vector_constrain(N,lp__); + else + beta2_pr = in__.vector_constrain(N); + + Eigen::Matrix pi_pr; + (void) pi_pr; // dummy to suppress unused var warning + if (jacobian__) + pi_pr = in__.vector_constrain(N,lp__); + else + pi_pr = in__.vector_constrain(N); + + Eigen::Matrix w_pr; + (void) w_pr; // dummy to suppress unused var warning + if (jacobian__) + w_pr = in__.vector_constrain(N,lp__); + else + w_pr = in__.vector_constrain(N); + + Eigen::Matrix lambda_pr; + (void) lambda_pr; // dummy to suppress unused var warning + if (jacobian__) + lambda_pr = in__.vector_constrain(N,lp__); + else + lambda_pr = in__.vector_constrain(N); + + + // transformed parameters + current_statement_begin__ = 44; + validate_non_negative_index("a1", "N", N); + Eigen::Matrix a1(static_cast(N)); + (void) a1; // dummy to suppress unused var warning + + stan::math::initialize(a1, DUMMY_VAR__); + stan::math::fill(a1,DUMMY_VAR__); + current_statement_begin__ = 45; + validate_non_negative_index("beta1", "N", N); + Eigen::Matrix beta1(static_cast(N)); + (void) beta1; // dummy to suppress unused var warning + + stan::math::initialize(beta1, DUMMY_VAR__); + stan::math::fill(beta1,DUMMY_VAR__); + current_statement_begin__ = 46; + validate_non_negative_index("a2", "N", N); + Eigen::Matrix a2(static_cast(N)); + (void) a2; // dummy to suppress unused var warning + + stan::math::initialize(a2, DUMMY_VAR__); + stan::math::fill(a2,DUMMY_VAR__); + current_statement_begin__ = 47; + validate_non_negative_index("beta2", "N", N); + Eigen::Matrix beta2(static_cast(N)); + (void) beta2; // dummy to suppress unused var warning + + stan::math::initialize(beta2, DUMMY_VAR__); + stan::math::fill(beta2,DUMMY_VAR__); + current_statement_begin__ = 48; + validate_non_negative_index("pi", "N", N); + Eigen::Matrix pi(static_cast(N)); + (void) pi; // dummy to suppress unused var warning + + stan::math::initialize(pi, DUMMY_VAR__); + stan::math::fill(pi,DUMMY_VAR__); + current_statement_begin__ = 49; + validate_non_negative_index("w", "N", N); + Eigen::Matrix w(static_cast(N)); + (void) w; // dummy to suppress unused var warning + + stan::math::initialize(w, DUMMY_VAR__); + stan::math::fill(w,DUMMY_VAR__); + current_statement_begin__ = 50; + validate_non_negative_index("lambda", "N", N); + Eigen::Matrix lambda(static_cast(N)); + (void) lambda; // dummy to suppress unused var warning + + stan::math::initialize(lambda, DUMMY_VAR__); + stan::math::fill(lambda,DUMMY_VAR__); + + + current_statement_begin__ = 52; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 53; + stan::model::assign(a1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), + "assigning variable a1"); + current_statement_begin__ = 54; + stan::model::assign(beta1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), + "assigning variable beta1"); + current_statement_begin__ = 55; + stan::model::assign(a2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), + "assigning variable a2"); + current_statement_begin__ = 56; + stan::model::assign(beta2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp((get_base1(mu_pr,4,"mu_pr",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), + "assigning variable beta2"); + current_statement_begin__ = 57; + stan::model::assign(pi, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,5,"mu_pr",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), + "assigning variable pi"); + current_statement_begin__ = 58; + stan::model::assign(w, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,6,"mu_pr",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), + "assigning variable w"); + current_statement_begin__ = 59; + stan::model::assign(lambda, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,7,"mu_pr",1) + (get_base1(sigma,7,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))), + "assigning variable lambda"); + } + + // validate transformed parameters + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(a1(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: a1" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(beta1(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: beta1" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(a2(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: a2" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(beta2(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: beta2" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(pi(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(w(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(lambda(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 44; + check_greater_or_equal(function__,"a1",a1,0); + check_less_or_equal(function__,"a1",a1,1); + current_statement_begin__ = 45; + check_greater_or_equal(function__,"beta1",beta1,0); + current_statement_begin__ = 46; + check_greater_or_equal(function__,"a2",a2,0); + check_less_or_equal(function__,"a2",a2,1); + current_statement_begin__ = 47; + check_greater_or_equal(function__,"beta2",beta2,0); + current_statement_begin__ = 48; + check_greater_or_equal(function__,"pi",pi,0); + check_less_or_equal(function__,"pi",pi,5); + current_statement_begin__ = 49; + check_greater_or_equal(function__,"w",w,0); + check_less_or_equal(function__,"w",w,1); + current_statement_begin__ = 50; + check_greater_or_equal(function__,"lambda",lambda,0); + check_less_or_equal(function__,"lambda",lambda,1); + + // model body + + current_statement_begin__ = 64; + lp_accum__.add(normal_log(mu_pr, 0, 1)); + current_statement_begin__ = 65; + lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); + current_statement_begin__ = 68; + lp_accum__.add(normal_log(a1_pr, 0, 1)); + current_statement_begin__ = 69; + lp_accum__.add(normal_log(beta1_pr, 0, 1)); + current_statement_begin__ = 70; + lp_accum__.add(normal_log(a2_pr, 0, 1)); + current_statement_begin__ = 71; + lp_accum__.add(normal_log(beta2_pr, 0, 1)); + current_statement_begin__ = 72; + lp_accum__.add(normal_log(pi_pr, 0, 1)); + current_statement_begin__ = 73; + lp_accum__.add(normal_log(w_pr, 0, 1)); + current_statement_begin__ = 74; + lp_accum__.add(normal_log(lambda_pr, 0, 1)); + current_statement_begin__ = 76; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 78; + validate_non_negative_index("v_mb", "2", 2); + Eigen::Matrix v_mb(static_cast(2)); + (void) v_mb; // dummy to suppress unused var warning + + stan::math::initialize(v_mb, DUMMY_VAR__); + stan::math::fill(v_mb,DUMMY_VAR__); + current_statement_begin__ = 79; + validate_non_negative_index("v_mf", "6", 6); + Eigen::Matrix v_mf(static_cast(6)); + (void) v_mf; // dummy to suppress unused var warning + + stan::math::initialize(v_mf, DUMMY_VAR__); + stan::math::fill(v_mf,DUMMY_VAR__); + current_statement_begin__ = 80; + validate_non_negative_index("v_hybrid", "2", 2); + Eigen::Matrix v_hybrid(static_cast(2)); + (void) v_hybrid; // dummy to suppress unused var warning + + stan::math::initialize(v_hybrid, DUMMY_VAR__); + stan::math::fill(v_hybrid,DUMMY_VAR__); + current_statement_begin__ = 81; + local_scalar_t__ level1_prob_choice2; + (void) level1_prob_choice2; // dummy to suppress unused var warning + + stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); + stan::math::fill(level1_prob_choice2,DUMMY_VAR__); + current_statement_begin__ = 82; + local_scalar_t__ level2_prob_choice2; + (void) level2_prob_choice2; // dummy to suppress unused var warning + + stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); + stan::math::fill(level2_prob_choice2,DUMMY_VAR__); + current_statement_begin__ = 83; + int level1_choice_01(0); + (void) level1_choice_01; // dummy to suppress unused var warning + + stan::math::fill(level1_choice_01, std::numeric_limits::min()); + current_statement_begin__ = 84; + int level2_choice_01(0); + (void) level2_choice_01; // dummy to suppress unused var warning + + stan::math::fill(level2_choice_01, std::numeric_limits::min()); + + + current_statement_begin__ = 87; + stan::math::assign(v_mb, rep_vector(0.0,2)); + current_statement_begin__ = 88; + stan::math::assign(v_mf, rep_vector(0.0,6)); + current_statement_begin__ = 89; + stan::math::assign(v_hybrid, rep_vector(0.0,2)); + current_statement_begin__ = 91; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 93; + stan::model::assign(v_mb, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), + "assigning variable v_mb"); + current_statement_begin__ = 94; + stan::model::assign(v_mb, + stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), + (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), + "assigning variable v_mb"); + current_statement_begin__ = 97; + stan::model::assign(v_hybrid, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), + "assigning variable v_hybrid"); + current_statement_begin__ = 98; + stan::model::assign(v_hybrid, + stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), + ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), + "assigning variable v_hybrid"); + current_statement_begin__ = 102; + stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); + current_statement_begin__ = 103; + if (as_bool(logical_eq(t,1))) { + + current_statement_begin__ = 104; + stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); + } else { + + current_statement_begin__ = 106; + stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); + } + current_statement_begin__ = 108; + lp_accum__.add(bernoulli_log(level1_choice_01, level1_prob_choice2)); + current_statement_begin__ = 111; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), "v_mf") + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1)))), + "assigning variable v_mf"); + current_statement_begin__ = 114; + stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); + current_statement_begin__ = 115; + if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { + + current_statement_begin__ = 116; + stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); + } else { + + current_statement_begin__ = 118; + stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); + } + current_statement_begin__ = 120; + lp_accum__.add(bernoulli_log(level2_choice_01, level2_prob_choice2)); + current_statement_begin__ = 124; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), "v_mf") + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1)))), + "assigning variable v_mf"); + current_statement_begin__ = 127; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), "v_mf") + ((get_base1(lambda,i,"lambda",1) * get_base1(a1,i,"a1",1)) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1)))), + "assigning variable v_mf"); + } + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + + lp_accum__.add(lp__); + return lp_accum__.sum(); + + } // log_prob() + + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + + + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("mu_pr"); + names__.push_back("sigma"); + names__.push_back("a1_pr"); + names__.push_back("beta1_pr"); + names__.push_back("a2_pr"); + names__.push_back("beta2_pr"); + names__.push_back("pi_pr"); + names__.push_back("w_pr"); + names__.push_back("lambda_pr"); + names__.push_back("a1"); + names__.push_back("beta1"); + names__.push_back("a2"); + names__.push_back("beta2"); + names__.push_back("pi"); + names__.push_back("w"); + names__.push_back("lambda"); + names__.push_back("mu_a1"); + names__.push_back("mu_beta1"); + names__.push_back("mu_a2"); + names__.push_back("mu_beta2"); + names__.push_back("mu_pi"); + names__.push_back("mu_w"); + names__.push_back("mu_lambda"); + names__.push_back("log_lik"); + names__.push_back("y_pred_step1"); + names__.push_back("y_pred_step2"); + } + + + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(7); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(7); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dims__.push_back(T); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dims__.push_back(T); + dimss__.push_back(dims__); + } + + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + + vars__.resize(0); + stan::io::reader in__(params_r__,params_i__); + static const char* function__ = "model_ts_par7_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + vector_d mu_pr = in__.vector_constrain(7); + vector_d sigma = in__.vector_lb_constrain(0,7); + vector_d a1_pr = in__.vector_constrain(N); + vector_d beta1_pr = in__.vector_constrain(N); + vector_d a2_pr = in__.vector_constrain(N); + vector_d beta2_pr = in__.vector_constrain(N); + vector_d pi_pr = in__.vector_constrain(N); + vector_d w_pr = in__.vector_constrain(N); + vector_d lambda_pr = in__.vector_constrain(N); + for (int k_0__ = 0; k_0__ < 7; ++k_0__) { + vars__.push_back(mu_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < 7; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(a1_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta1_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(a2_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta2_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(pi_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(w_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(lambda_pr[k_0__]); + } + + // declare and define transformed parameters + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + try { + current_statement_begin__ = 44; + validate_non_negative_index("a1", "N", N); + Eigen::Matrix a1(static_cast(N)); + (void) a1; // dummy to suppress unused var warning + + stan::math::initialize(a1, DUMMY_VAR__); + stan::math::fill(a1,DUMMY_VAR__); + current_statement_begin__ = 45; + validate_non_negative_index("beta1", "N", N); + Eigen::Matrix beta1(static_cast(N)); + (void) beta1; // dummy to suppress unused var warning + + stan::math::initialize(beta1, DUMMY_VAR__); + stan::math::fill(beta1,DUMMY_VAR__); + current_statement_begin__ = 46; + validate_non_negative_index("a2", "N", N); + Eigen::Matrix a2(static_cast(N)); + (void) a2; // dummy to suppress unused var warning + + stan::math::initialize(a2, DUMMY_VAR__); + stan::math::fill(a2,DUMMY_VAR__); + current_statement_begin__ = 47; + validate_non_negative_index("beta2", "N", N); + Eigen::Matrix beta2(static_cast(N)); + (void) beta2; // dummy to suppress unused var warning + + stan::math::initialize(beta2, DUMMY_VAR__); + stan::math::fill(beta2,DUMMY_VAR__); + current_statement_begin__ = 48; + validate_non_negative_index("pi", "N", N); + Eigen::Matrix pi(static_cast(N)); + (void) pi; // dummy to suppress unused var warning + + stan::math::initialize(pi, DUMMY_VAR__); + stan::math::fill(pi,DUMMY_VAR__); + current_statement_begin__ = 49; + validate_non_negative_index("w", "N", N); + Eigen::Matrix w(static_cast(N)); + (void) w; // dummy to suppress unused var warning + + stan::math::initialize(w, DUMMY_VAR__); + stan::math::fill(w,DUMMY_VAR__); + current_statement_begin__ = 50; + validate_non_negative_index("lambda", "N", N); + Eigen::Matrix lambda(static_cast(N)); + (void) lambda; // dummy to suppress unused var warning + + stan::math::initialize(lambda, DUMMY_VAR__); + stan::math::fill(lambda,DUMMY_VAR__); + + + current_statement_begin__ = 52; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 53; + stan::model::assign(a1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), + "assigning variable a1"); + current_statement_begin__ = 54; + stan::model::assign(beta1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), + "assigning variable beta1"); + current_statement_begin__ = 55; + stan::model::assign(a2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), + "assigning variable a2"); + current_statement_begin__ = 56; + stan::model::assign(beta2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + stan::math::exp((get_base1(mu_pr,4,"mu_pr",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), + "assigning variable beta2"); + current_statement_begin__ = 57; + stan::model::assign(pi, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,5,"mu_pr",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), + "assigning variable pi"); + current_statement_begin__ = 58; + stan::model::assign(w, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,6,"mu_pr",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), + "assigning variable w"); + current_statement_begin__ = 59; + stan::model::assign(lambda, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,7,"mu_pr",1) + (get_base1(sigma,7,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))), + "assigning variable lambda"); + } + + // validate transformed parameters + current_statement_begin__ = 44; + check_greater_or_equal(function__,"a1",a1,0); + check_less_or_equal(function__,"a1",a1,1); + current_statement_begin__ = 45; + check_greater_or_equal(function__,"beta1",beta1,0); + current_statement_begin__ = 46; + check_greater_or_equal(function__,"a2",a2,0); + check_less_or_equal(function__,"a2",a2,1); + current_statement_begin__ = 47; + check_greater_or_equal(function__,"beta2",beta2,0); + current_statement_begin__ = 48; + check_greater_or_equal(function__,"pi",pi,0); + check_less_or_equal(function__,"pi",pi,5); + current_statement_begin__ = 49; + check_greater_or_equal(function__,"w",w,0); + check_less_or_equal(function__,"w",w,1); + current_statement_begin__ = 50; + check_greater_or_equal(function__,"lambda",lambda,0); + check_less_or_equal(function__,"lambda",lambda,1); + + // write transformed parameters + if (include_tparams__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(a1[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta1[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(a2[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta2[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(pi[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(w[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(lambda[k_0__]); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 134; + local_scalar_t__ mu_a1; + (void) mu_a1; // dummy to suppress unused var warning + + stan::math::initialize(mu_a1, DUMMY_VAR__); + stan::math::fill(mu_a1,DUMMY_VAR__); + current_statement_begin__ = 135; + local_scalar_t__ mu_beta1; + (void) mu_beta1; // dummy to suppress unused var warning + + stan::math::initialize(mu_beta1, DUMMY_VAR__); + stan::math::fill(mu_beta1,DUMMY_VAR__); + current_statement_begin__ = 136; + local_scalar_t__ mu_a2; + (void) mu_a2; // dummy to suppress unused var warning + + stan::math::initialize(mu_a2, DUMMY_VAR__); + stan::math::fill(mu_a2,DUMMY_VAR__); + current_statement_begin__ = 137; + local_scalar_t__ mu_beta2; + (void) mu_beta2; // dummy to suppress unused var warning + + stan::math::initialize(mu_beta2, DUMMY_VAR__); + stan::math::fill(mu_beta2,DUMMY_VAR__); + current_statement_begin__ = 138; + local_scalar_t__ mu_pi; + (void) mu_pi; // dummy to suppress unused var warning + + stan::math::initialize(mu_pi, DUMMY_VAR__); + stan::math::fill(mu_pi,DUMMY_VAR__); + current_statement_begin__ = 139; + local_scalar_t__ mu_w; + (void) mu_w; // dummy to suppress unused var warning + + stan::math::initialize(mu_w, DUMMY_VAR__); + stan::math::fill(mu_w,DUMMY_VAR__); + current_statement_begin__ = 140; + local_scalar_t__ mu_lambda; + (void) mu_lambda; // dummy to suppress unused var warning + + stan::math::initialize(mu_lambda, DUMMY_VAR__); + stan::math::fill(mu_lambda,DUMMY_VAR__); + current_statement_begin__ = 143; + validate_non_negative_index("log_lik", "N", N); + vector log_lik(N); + stan::math::initialize(log_lik, DUMMY_VAR__); + stan::math::fill(log_lik,DUMMY_VAR__); + current_statement_begin__ = 146; + validate_non_negative_index("y_pred_step1", "N", N); + validate_non_negative_index("y_pred_step1", "T", T); + vector > y_pred_step1(N, (vector(T))); + stan::math::initialize(y_pred_step1, DUMMY_VAR__); + stan::math::fill(y_pred_step1,DUMMY_VAR__); + current_statement_begin__ = 147; + validate_non_negative_index("y_pred_step2", "N", N); + validate_non_negative_index("y_pred_step2", "T", T); + vector > y_pred_step2(N, (vector(T))); + stan::math::initialize(y_pred_step2, DUMMY_VAR__); + stan::math::fill(y_pred_step2,DUMMY_VAR__); + + + current_statement_begin__ = 150; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 151; + for (int t = 1; t <= T; ++t) { + + current_statement_begin__ = 152; + stan::model::assign(y_pred_step1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + -(1), + "assigning variable y_pred_step1"); + current_statement_begin__ = 153; + stan::model::assign(y_pred_step2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + -(1), + "assigning variable y_pred_step2"); + } + } + current_statement_begin__ = 158; + stan::math::assign(mu_a1, Phi_approx(get_base1(mu_pr,1,"mu_pr",1))); + current_statement_begin__ = 159; + stan::math::assign(mu_beta1, stan::math::exp(get_base1(mu_pr,2,"mu_pr",1))); + current_statement_begin__ = 160; + stan::math::assign(mu_a2, Phi_approx(get_base1(mu_pr,3,"mu_pr",1))); + current_statement_begin__ = 161; + stan::math::assign(mu_beta2, stan::math::exp(get_base1(mu_pr,4,"mu_pr",1))); + current_statement_begin__ = 162; + stan::math::assign(mu_pi, (Phi_approx(get_base1(mu_pr,5,"mu_pr",1)) * 5)); + current_statement_begin__ = 163; + stan::math::assign(mu_w, Phi_approx(get_base1(mu_pr,6,"mu_pr",1))); + current_statement_begin__ = 164; + stan::math::assign(mu_lambda, Phi_approx(get_base1(mu_pr,7,"mu_pr",1))); + + current_statement_begin__ = 167; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 169; + validate_non_negative_index("v_mb", "2", 2); + Eigen::Matrix v_mb(static_cast(2)); + (void) v_mb; // dummy to suppress unused var warning + + stan::math::initialize(v_mb, DUMMY_VAR__); + stan::math::fill(v_mb,DUMMY_VAR__); + current_statement_begin__ = 170; + validate_non_negative_index("v_mf", "6", 6); + Eigen::Matrix v_mf(static_cast(6)); + (void) v_mf; // dummy to suppress unused var warning + + stan::math::initialize(v_mf, DUMMY_VAR__); + stan::math::fill(v_mf,DUMMY_VAR__); + current_statement_begin__ = 171; + validate_non_negative_index("v_hybrid", "2", 2); + Eigen::Matrix v_hybrid(static_cast(2)); + (void) v_hybrid; // dummy to suppress unused var warning + + stan::math::initialize(v_hybrid, DUMMY_VAR__); + stan::math::fill(v_hybrid,DUMMY_VAR__); + current_statement_begin__ = 172; + local_scalar_t__ level1_prob_choice2; + (void) level1_prob_choice2; // dummy to suppress unused var warning + + stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); + stan::math::fill(level1_prob_choice2,DUMMY_VAR__); + current_statement_begin__ = 173; + local_scalar_t__ level2_prob_choice2; + (void) level2_prob_choice2; // dummy to suppress unused var warning + + stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); + stan::math::fill(level2_prob_choice2,DUMMY_VAR__); + current_statement_begin__ = 174; + int level1_choice_01(0); + (void) level1_choice_01; // dummy to suppress unused var warning + + stan::math::fill(level1_choice_01, std::numeric_limits::min()); + current_statement_begin__ = 175; + int level2_choice_01(0); + (void) level2_choice_01; // dummy to suppress unused var warning + + stan::math::fill(level2_choice_01, std::numeric_limits::min()); + + + current_statement_begin__ = 178; + stan::math::assign(v_mb, rep_vector(0.0,2)); + current_statement_begin__ = 179; + stan::math::assign(v_mf, rep_vector(0.0,6)); + current_statement_begin__ = 180; + stan::math::assign(v_hybrid, rep_vector(0.0,2)); + current_statement_begin__ = 182; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + 0, + "assigning variable log_lik"); + current_statement_begin__ = 184; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 186; + stan::model::assign(v_mb, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), + "assigning variable v_mb"); + current_statement_begin__ = 187; + stan::model::assign(v_mb, + stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), + (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), + "assigning variable v_mb"); + current_statement_begin__ = 190; + stan::model::assign(v_hybrid, + stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), + ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), + "assigning variable v_hybrid"); + current_statement_begin__ = 191; + stan::model::assign(v_hybrid, + stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), + ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), + "assigning variable v_hybrid"); + current_statement_begin__ = 195; + stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); + current_statement_begin__ = 196; + if (as_bool(logical_eq(t,1))) { + + current_statement_begin__ = 197; + stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); + } else { + + current_statement_begin__ = 199; + stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); + } + current_statement_begin__ = 201; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_log(level1_choice_01,level1_prob_choice2)), + "assigning variable log_lik"); + current_statement_begin__ = 204; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), "v_mf") + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1)))), + "assigning variable v_mf"); + current_statement_begin__ = 207; + stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); + current_statement_begin__ = 209; + if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { + + current_statement_begin__ = 210; + stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); + } else { + + current_statement_begin__ = 212; + stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); + } + current_statement_begin__ = 214; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_log(level2_choice_01,level2_prob_choice2)), + "assigning variable log_lik"); + current_statement_begin__ = 217; + stan::model::assign(y_pred_step1, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + bernoulli_rng(level1_prob_choice2, base_rng__), + "assigning variable y_pred_step1"); + current_statement_begin__ = 218; + stan::model::assign(y_pred_step2, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + bernoulli_rng(level2_prob_choice2, base_rng__), + "assigning variable y_pred_step2"); + current_statement_begin__ = 222; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), "v_mf") + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1)))), + "assigning variable v_mf"); + current_statement_begin__ = 225; + stan::model::assign(v_mf, + stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), + (stan::model::rvalue(v_mf, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), "v_mf") + ((get_base1(lambda,i,"lambda",1) * get_base1(a1,i,"a1",1)) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1)))), + "assigning variable v_mf"); + } + } + } + + // validate generated quantities + current_statement_begin__ = 134; + check_greater_or_equal(function__,"mu_a1",mu_a1,0); + check_less_or_equal(function__,"mu_a1",mu_a1,1); + current_statement_begin__ = 135; + check_greater_or_equal(function__,"mu_beta1",mu_beta1,0); + current_statement_begin__ = 136; + check_greater_or_equal(function__,"mu_a2",mu_a2,0); + check_less_or_equal(function__,"mu_a2",mu_a2,1); + current_statement_begin__ = 137; + check_greater_or_equal(function__,"mu_beta2",mu_beta2,0); + current_statement_begin__ = 138; + check_greater_or_equal(function__,"mu_pi",mu_pi,0); + check_less_or_equal(function__,"mu_pi",mu_pi,5); + current_statement_begin__ = 139; + check_greater_or_equal(function__,"mu_w",mu_w,0); + check_less_or_equal(function__,"mu_w",mu_w,1); + current_statement_begin__ = 140; + check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); + check_less_or_equal(function__,"mu_lambda",mu_lambda,1); + current_statement_begin__ = 143; + current_statement_begin__ = 146; + current_statement_begin__ = 147; + + // write generated quantities + vars__.push_back(mu_a1); + vars__.push_back(mu_beta1); + vars__.push_back(mu_a2); + vars__.push_back(mu_beta2); + vars__.push_back(mu_pi); + vars__.push_back(mu_w); + vars__.push_back(mu_lambda); + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(log_lik[k_0__]); + } + for (int k_1__ = 0; k_1__ < T; ++k_1__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(y_pred_step1[k_0__][k_1__]); + } + } + for (int k_1__ = 0; k_1__ < T; ++k_1__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(y_pred_step2[k_0__][k_1__]); + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + + static std::string model_name() { + return "model_ts_par7"; + } + + + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a1_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta1_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a2_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta2_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pi_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a1" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta1" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a2" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta2" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pi" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_a1"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta1"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_a2"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta2"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pi"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_w"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_lambda"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + + + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a1_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta1_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a2_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta2_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pi_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a1" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta1" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "a2" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta2" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "pi" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "w" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "lambda" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_a1"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta1"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_a2"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta2"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pi"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_w"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_lambda"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + +}; // model + +} + +typedef model_ts_par7_namespace::model_ts_par7 stan_model; + + +#endif diff --git a/inst/stan_files/ts_par7.o b/inst/stan_files/ts_par7.o new file mode 100644 index 00000000..5ed53a45 Binary files /dev/null and b/inst/stan_files/ts_par7.o differ diff --git a/exec/ts_par7.stan b/inst/stan_files/ts_par7.stan old mode 100755 new mode 100644 similarity index 81% rename from exec/ts_par7.stan rename to inst/stan_files/ts_par7.stan index 065ac64b..089042c2 --- a/exec/ts_par7.stan +++ b/inst/stan_files/ts_par7.stan @@ -1,3 +1,5 @@ +#include /pre/license.stan + data { int N; int T; @@ -12,7 +14,7 @@ transformed data { parameters { // Declare all parameters as vectors for vectorizing // Hyper(group)-parameters - vector[7] mu_p; + vector[7] mu_pr; vector[7] sigma; // Subject-level raw parameters (for Matt trick) @@ -35,18 +37,18 @@ transformed parameters { vector[N] lambda; for (i in 1:N) { - a1[i] = Phi_approx( mu_p[1] + sigma[1] * a1_pr[i] ); - beta1[i] = exp( mu_p[2] + sigma[2] * beta1_pr[i] ); - a2[i] = Phi_approx( mu_p[3] + sigma[3] * a2_pr[i] ); - beta2[i] = exp( mu_p[4] + sigma[4] * beta2_pr[i] ); - pi[i] = Phi_approx( mu_p[5] + sigma[5] * pi_pr[i] ) * 5; - w[i] = Phi_approx( mu_p[6] + sigma[6] * w_pr[i] ); - lambda[i] = Phi_approx( mu_p[7] + sigma[7] * lambda_pr[i] ); + a1[i] = Phi_approx( mu_pr[1] + sigma[1] * a1_pr[i] ); + beta1[i] = exp( mu_pr[2] + sigma[2] * beta1_pr[i] ); + a2[i] = Phi_approx( mu_pr[3] + sigma[3] * a2_pr[i] ); + beta2[i] = exp( mu_pr[4] + sigma[4] * beta2_pr[i] ); + pi[i] = Phi_approx( mu_pr[5] + sigma[5] * pi_pr[i] ) * 5; + w[i] = Phi_approx( mu_pr[6] + sigma[6] * w_pr[i] ); + lambda[i] = Phi_approx( mu_pr[7] + sigma[7] * lambda_pr[i] ); } } model { // Hyperparameters - mu_p ~ normal(0, 1); + mu_pr ~ normal(0, 1); sigma ~ normal(0, 0.2); // individual parameters @@ -93,7 +95,7 @@ model { level1_choice_01 ~ bernoulli( level1_prob_choice2 ); // level 1, prob. of choosing 2 in level 1 // Observe Level2 and update Level1 of the chosen option - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a1[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); + v_mf[level1_choice[i,t]] += a1[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); // Prob of choosing stim 2 (2 from [1,2] OR 4 from [3,4]) in ** Level (step) 2 ** level2_choice_01 = 1 - modulus(level2_choice[i,t], 2); // 1,3 --> 0; 2,4 --> 1 @@ -106,10 +108,10 @@ model { // After observing the reward at Level 2... // Update Level 2 v_mf of the chosen option. Level 2--> choose one of level 2 options and observe reward - v_mf[2+ level2_choice[i,t]] = v_mf[2+ level2_choice[i,t]] + a2[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); + v_mf[2+ level2_choice[i,t]] += a2[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); // Update Level 1 v_mf - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + lambda[i] * a1[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); + v_mf[level1_choice[i,t]] += lambda[i] * a1[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); } // end of t loop } // end of i loop } @@ -140,13 +142,13 @@ generated quantities { } // Generate group level parameter values - mu_a1 = Phi_approx( mu_p[1] ); - mu_beta1 = exp( mu_p[2] ); - mu_a2 = Phi_approx( mu_p[3] ); - mu_beta2 = exp( mu_p[4] ); - mu_pi = Phi_approx( mu_p[5] ) * 5; - mu_w = Phi_approx( mu_p[6] ); - mu_lambda = Phi_approx( mu_p[7] ); + mu_a1 = Phi_approx( mu_pr[1] ); + mu_beta1 = exp( mu_pr[2] ); + mu_a2 = Phi_approx( mu_pr[3] ); + mu_beta2 = exp( mu_pr[4] ); + mu_pi = Phi_approx( mu_pr[5] ) * 5; + mu_w = Phi_approx( mu_pr[6] ); + mu_lambda = Phi_approx( mu_pr[7] ); { // local section, this saves time and space for (i in 1:N) { @@ -183,10 +185,10 @@ generated quantities { } else{ level1_prob_choice2 = inv_logit( beta1[i]*(v_hybrid[2]-v_hybrid[1]) + pi[i]*(2*level1_choice[i,t-1] -3) ); } - log_lik[i] = log_lik[i] + bernoulli_lpmf( level1_choice_01 | level1_prob_choice2 ); + log_lik[i] += bernoulli_lpmf( level1_choice_01 | level1_prob_choice2 ); // Observe Level2 and update Level1 of the chosen option - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + a1[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); + v_mf[level1_choice[i,t]] += a1[i]*(v_mf[2+ level2_choice[i,t]] - v_mf[ level1_choice[i,t]]); // Prob of choosing stim 2 (2 from [1,2] OR 4 from [3,4]) in ** Level (step) 2 ** level2_choice_01 = 1 - modulus(level2_choice[i,t], 2); // 1,3 --> 0; 2,4 @@ -196,7 +198,7 @@ generated quantities { } else { // level2_choice = 1 or 2 level2_prob_choice2 = inv_logit( beta2[i]*( v_mf[4] - v_mf[3] ) ); } - log_lik[i] = log_lik[i] + bernoulli_lpmf( level2_choice_01 | level2_prob_choice2 ); + log_lik[i] += bernoulli_lpmf( level2_choice_01 | level2_prob_choice2 ); // generate posterior prediction for current trial y_pred_step1[i,t] = bernoulli_rng(level1_prob_choice2); @@ -204,11 +206,12 @@ generated quantities { // After observing the reward at Level 2... // Update Level 2 v_mf of the chosen option. Level 2--> choose one of level 2 options and observe reward - v_mf[2+ level2_choice[i,t]] = v_mf[2+ level2_choice[i,t]] + a2[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); + v_mf[2+ level2_choice[i,t]] += a2[i]*(reward[i,t] - v_mf[2+ level2_choice[i,t] ] ); // Update Level 1 v_mf - v_mf[level1_choice[i,t]] = v_mf[level1_choice[i,t]] + lambda[i] * a1[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); + v_mf[level1_choice[i,t]] += lambda[i] * a1[i] * (reward[i,t] - v_mf[2+level2_choice[i,t]]); } // end of t loop } // end of i loop - } - } + } +} + diff --git a/inst/stan_files/ug_bayes.hpp b/inst/stan_files/ug_bayes.hpp new file mode 100644 index 00000000..eb6651bb --- /dev/null +++ b/inst/stan_files/ug_bayes.hpp @@ -0,0 +1,1156 @@ +/* + hBayesDM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + hBayesDM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with hBayesDM. If not, see . +*/ +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.18.0 + +#include + +namespace model_ug_bayes_namespace { + +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; + +static int current_statement_begin__; + +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_ug_bayes"); + reader.add_event(0, 0, "include", "/pre/license.stan"); + reader.add_event(0, 0, "start", "/pre/license.stan"); + reader.add_event(14, 14, "end", "/pre/license.stan"); + reader.add_event(14, 1, "restart", "model_ug_bayes"); + reader.add_event(181, 166, "end", "model_ug_bayes"); + return reader; +} + +#include + class model_ug_bayes : public prob_grad { +private: + int N; + int T; + vector Tsubj; + vector > offer; + vector > accept; + double initV; + double mu0; + double k0; + double sig20; + double nu0; +public: + model_ug_bayes(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, 0, pstream__); + } + + model_ug_bayes(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, random_seed__, pstream__); + } + + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + + current_statement_begin__ = -1; + + static const char* function__ = "model_ug_bayes_namespace::model_ug_bayes"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + // initialize member variables + try { + current_statement_begin__ = 17; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + current_statement_begin__ = 18; + context__.validate_dims("data initialization", "T", "int", context__.to_vec()); + T = int(0); + vals_i__ = context__.vals_i("T"); + pos__ = 0; + T = vals_i__[pos__++]; + current_statement_begin__ = 19; + validate_non_negative_index("Tsubj", "N", N); + context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); + validate_non_negative_index("Tsubj", "N", N); + Tsubj = std::vector(N,int(0)); + vals_i__ = context__.vals_i("Tsubj"); + pos__ = 0; + size_t Tsubj_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { + Tsubj[i_0__] = vals_i__[pos__++]; + } + current_statement_begin__ = 20; + validate_non_negative_index("offer", "N", N); + validate_non_negative_index("offer", "T", T); + context__.validate_dims("data initialization", "offer", "double", context__.to_vec(N,T)); + validate_non_negative_index("offer", "N", N); + validate_non_negative_index("offer", "T", T); + offer = std::vector >(N,std::vector(T,double(0))); + vals_r__ = context__.vals_r("offer"); + pos__ = 0; + size_t offer_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < offer_limit_1__; ++i_1__) { + size_t offer_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < offer_limit_0__; ++i_0__) { + offer[i_0__][i_1__] = vals_r__[pos__++]; + } + } + current_statement_begin__ = 21; + validate_non_negative_index("accept", "N", N); + validate_non_negative_index("accept", "T", T); + context__.validate_dims("data initialization", "accept", "int", context__.to_vec(N,T)); + validate_non_negative_index("accept", "N", N); + validate_non_negative_index("accept", "T", T); + accept = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("accept"); + pos__ = 0; + size_t accept_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < accept_limit_1__; ++i_1__) { + size_t accept_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < accept_limit_0__; ++i_0__) { + accept[i_0__][i_1__] = vals_i__[pos__++]; + } + } + + // validate, data variables + current_statement_begin__ = 17; + check_greater_or_equal(function__,"N",N,1); + current_statement_begin__ = 18; + check_greater_or_equal(function__,"T",T,1); + current_statement_begin__ = 19; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); + check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); + } + current_statement_begin__ = 20; + current_statement_begin__ = 21; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],-(1)); + check_less_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],1); + } + } + // initialize data variables + current_statement_begin__ = 25; + initV = double(0); + stan::math::fill(initV,DUMMY_VAR__); + current_statement_begin__ = 26; + mu0 = double(0); + stan::math::fill(mu0,DUMMY_VAR__); + current_statement_begin__ = 27; + k0 = double(0); + stan::math::fill(k0,DUMMY_VAR__); + current_statement_begin__ = 28; + sig20 = double(0); + stan::math::fill(sig20,DUMMY_VAR__); + current_statement_begin__ = 29; + nu0 = double(0); + stan::math::fill(nu0,DUMMY_VAR__); + + current_statement_begin__ = 31; + stan::math::assign(initV, 0.0); + current_statement_begin__ = 32; + stan::math::assign(mu0, 10.0); + current_statement_begin__ = 33; + stan::math::assign(k0, 4.0); + current_statement_begin__ = 34; + stan::math::assign(sig20, 4.0); + current_statement_begin__ = 35; + stan::math::assign(nu0, 10.0); + + // validate transformed data + current_statement_begin__ = 25; + current_statement_begin__ = 26; + current_statement_begin__ = 27; + current_statement_begin__ = 28; + current_statement_begin__ = 29; + + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 41; + validate_non_negative_index("mu_pr", "3", 3); + num_params_r__ += 3; + current_statement_begin__ = 42; + validate_non_negative_index("sigma", "3", 3); + num_params_r__ += 3; + current_statement_begin__ = 45; + validate_non_negative_index("alpha_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 46; + validate_non_negative_index("beta_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 47; + validate_non_negative_index("tau_pr", "N", N); + num_params_r__ += N; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + ~model_ug_bayes() { } + + + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + stan::io::writer writer__(params_r__,params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + + if (!(context__.contains_r("mu_pr"))) + throw std::runtime_error("variable mu_pr missing"); + vals_r__ = context__.vals_r("mu_pr"); + pos__ = 0U; + validate_non_negative_index("mu_pr", "3", 3); + context__.validate_dims("initialization", "mu_pr", "vector_d", context__.to_vec(3)); + vector_d mu_pr(static_cast(3)); + for (int j1__ = 0U; j1__ < 3; ++j1__) + mu_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(mu_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable mu_pr: ") + e.what()); + } + + if (!(context__.contains_r("sigma"))) + throw std::runtime_error("variable sigma missing"); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "3", 3); + context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); + vector_d sigma(static_cast(3)); + for (int j1__ = 0U; j1__ < 3; ++j1__) + sigma(j1__) = vals_r__[pos__++]; + try { + writer__.vector_lb_unconstrain(0,sigma); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); + } + + if (!(context__.contains_r("alpha_pr"))) + throw std::runtime_error("variable alpha_pr missing"); + vals_r__ = context__.vals_r("alpha_pr"); + pos__ = 0U; + validate_non_negative_index("alpha_pr", "N", N); + context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); + vector_d alpha_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + alpha_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(alpha_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); + } + + if (!(context__.contains_r("beta_pr"))) + throw std::runtime_error("variable beta_pr missing"); + vals_r__ = context__.vals_r("beta_pr"); + pos__ = 0U; + validate_non_negative_index("beta_pr", "N", N); + context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); + vector_d beta_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + beta_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(beta_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); + } + + if (!(context__.contains_r("tau_pr"))) + throw std::runtime_error("variable tau_pr missing"); + vals_r__ = context__.vals_r("tau_pr"); + pos__ = 0U; + validate_non_negative_index("tau_pr", "N", N); + context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); + vector_d tau_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + tau_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(tau_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); + } + + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + + + template + T__ log_prob(vector& params_r__, + vector& params_i__, + std::ostream* pstream__ = 0) const { + + typedef T__ local_scalar_t__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + + try { + // model parameters + stan::io::reader in__(params_r__,params_i__); + + Eigen::Matrix mu_pr; + (void) mu_pr; // dummy to suppress unused var warning + if (jacobian__) + mu_pr = in__.vector_constrain(3,lp__); + else + mu_pr = in__.vector_constrain(3); + + Eigen::Matrix sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.vector_lb_constrain(0,3,lp__); + else + sigma = in__.vector_lb_constrain(0,3); + + Eigen::Matrix alpha_pr; + (void) alpha_pr; // dummy to suppress unused var warning + if (jacobian__) + alpha_pr = in__.vector_constrain(N,lp__); + else + alpha_pr = in__.vector_constrain(N); + + Eigen::Matrix beta_pr; + (void) beta_pr; // dummy to suppress unused var warning + if (jacobian__) + beta_pr = in__.vector_constrain(N,lp__); + else + beta_pr = in__.vector_constrain(N); + + Eigen::Matrix tau_pr; + (void) tau_pr; // dummy to suppress unused var warning + if (jacobian__) + tau_pr = in__.vector_constrain(N,lp__); + else + tau_pr = in__.vector_constrain(N); + + + // transformed parameters + current_statement_begin__ = 52; + validate_non_negative_index("alpha", "N", N); + vector alpha(N); + stan::math::initialize(alpha, DUMMY_VAR__); + stan::math::fill(alpha,DUMMY_VAR__); + current_statement_begin__ = 53; + validate_non_negative_index("beta", "N", N); + vector beta(N); + stan::math::initialize(beta, DUMMY_VAR__); + stan::math::fill(beta,DUMMY_VAR__); + current_statement_begin__ = 54; + validate_non_negative_index("tau", "N", N); + vector tau(N); + stan::math::initialize(tau, DUMMY_VAR__); + stan::math::fill(tau,DUMMY_VAR__); + + + current_statement_begin__ = 56; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 57; + stan::model::assign(alpha, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), + "assigning variable alpha"); + current_statement_begin__ = 58; + stan::model::assign(beta, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), + "assigning variable beta"); + current_statement_begin__ = 59; + stan::model::assign(tau, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), + "assigning variable tau"); + } + + // validate transformed parameters + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(alpha[i0__])) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(beta[i0__])) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(tau[i0__])) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 52; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); + check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); + } + current_statement_begin__ = 53; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"beta[k0__]",beta[k0__],0); + check_less_or_equal(function__,"beta[k0__]",beta[k0__],10); + } + current_statement_begin__ = 54; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); + check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); + } + + // model body + + current_statement_begin__ = 65; + lp_accum__.add(normal_log(mu_pr, 0, 1)); + current_statement_begin__ = 66; + lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); + current_statement_begin__ = 69; + lp_accum__.add(normal_log(alpha_pr, 0, 1.0)); + current_statement_begin__ = 70; + lp_accum__.add(normal_log(beta_pr, 0, 1.0)); + current_statement_begin__ = 71; + lp_accum__.add(normal_log(tau_pr, 0, 1.0)); + current_statement_begin__ = 73; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 75; + local_scalar_t__ util; + (void) util; // dummy to suppress unused var warning + + stan::math::initialize(util, DUMMY_VAR__); + stan::math::fill(util,DUMMY_VAR__); + current_statement_begin__ = 76; + local_scalar_t__ mu_old; + (void) mu_old; // dummy to suppress unused var warning + + stan::math::initialize(mu_old, DUMMY_VAR__); + stan::math::fill(mu_old,DUMMY_VAR__); + current_statement_begin__ = 77; + local_scalar_t__ mu_new; + (void) mu_new; // dummy to suppress unused var warning + + stan::math::initialize(mu_new, DUMMY_VAR__); + stan::math::fill(mu_new,DUMMY_VAR__); + current_statement_begin__ = 78; + local_scalar_t__ k_old; + (void) k_old; // dummy to suppress unused var warning + + stan::math::initialize(k_old, DUMMY_VAR__); + stan::math::fill(k_old,DUMMY_VAR__); + current_statement_begin__ = 79; + local_scalar_t__ k_new; + (void) k_new; // dummy to suppress unused var warning + + stan::math::initialize(k_new, DUMMY_VAR__); + stan::math::fill(k_new,DUMMY_VAR__); + current_statement_begin__ = 80; + local_scalar_t__ sig2_old; + (void) sig2_old; // dummy to suppress unused var warning + + stan::math::initialize(sig2_old, DUMMY_VAR__); + stan::math::fill(sig2_old,DUMMY_VAR__); + current_statement_begin__ = 81; + local_scalar_t__ sig2_new; + (void) sig2_new; // dummy to suppress unused var warning + + stan::math::initialize(sig2_new, DUMMY_VAR__); + stan::math::fill(sig2_new,DUMMY_VAR__); + current_statement_begin__ = 82; + local_scalar_t__ nu_old; + (void) nu_old; // dummy to suppress unused var warning + + stan::math::initialize(nu_old, DUMMY_VAR__); + stan::math::fill(nu_old,DUMMY_VAR__); + current_statement_begin__ = 83; + local_scalar_t__ nu_new; + (void) nu_new; // dummy to suppress unused var warning + + stan::math::initialize(nu_new, DUMMY_VAR__); + stan::math::fill(nu_new,DUMMY_VAR__); + current_statement_begin__ = 84; + local_scalar_t__ PE; + (void) PE; // dummy to suppress unused var warning + + stan::math::initialize(PE, DUMMY_VAR__); + stan::math::fill(PE,DUMMY_VAR__); + + + current_statement_begin__ = 87; + stan::math::assign(mu_old, mu0); + current_statement_begin__ = 88; + stan::math::assign(k_old, k0); + current_statement_begin__ = 89; + stan::math::assign(sig2_old, sig20); + current_statement_begin__ = 90; + stan::math::assign(nu_old, nu0); + current_statement_begin__ = 92; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 93; + stan::math::assign(k_new, (k_old + 1)); + current_statement_begin__ = 94; + stan::math::assign(nu_new, (nu_old + 1)); + current_statement_begin__ = 95; + stan::math::assign(mu_new, (((k_old / k_new) * mu_old) + ((1 / k_new) * get_base1(get_base1(offer,i,"offer",1),t,"offer",2)))); + current_statement_begin__ = 96; + stan::math::assign(sig2_new, (((nu_old / nu_new) * sig2_old) + (((1 / nu_new) * (k_old / k_new)) * pow((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old),2)))); + current_statement_begin__ = 98; + stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old)); + current_statement_begin__ = 99; + stan::math::assign(util, ((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((mu_new - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0))) - (get_base1(beta,i,"beta",1) * stan::math::fmax((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_new),0.0)))); + current_statement_begin__ = 101; + lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2), (util * get_base1(tau,i,"tau",1)))); + current_statement_begin__ = 104; + stan::math::assign(mu_old, mu_new); + current_statement_begin__ = 105; + stan::math::assign(sig2_old, sig2_new); + current_statement_begin__ = 106; + stan::math::assign(k_old, k_new); + current_statement_begin__ = 107; + stan::math::assign(nu_old, nu_new); + } + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + + lp_accum__.add(lp__); + return lp_accum__.sum(); + + } // log_prob() + + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + + + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("mu_pr"); + names__.push_back("sigma"); + names__.push_back("alpha_pr"); + names__.push_back("beta_pr"); + names__.push_back("tau_pr"); + names__.push_back("alpha"); + names__.push_back("beta"); + names__.push_back("tau"); + names__.push_back("mu_alpha"); + names__.push_back("mu_beta"); + names__.push_back("mu_tau"); + names__.push_back("log_lik"); + names__.push_back("y_pred"); + } + + + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(3); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(3); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dims__.push_back(T); + dimss__.push_back(dims__); + } + + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + + vars__.resize(0); + stan::io::reader in__(params_r__,params_i__); + static const char* function__ = "model_ug_bayes_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + vector_d mu_pr = in__.vector_constrain(3); + vector_d sigma = in__.vector_lb_constrain(0,3); + vector_d alpha_pr = in__.vector_constrain(N); + vector_d beta_pr = in__.vector_constrain(N); + vector_d tau_pr = in__.vector_constrain(N); + for (int k_0__ = 0; k_0__ < 3; ++k_0__) { + vars__.push_back(mu_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < 3; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(alpha_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(tau_pr[k_0__]); + } + + // declare and define transformed parameters + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + try { + current_statement_begin__ = 52; + validate_non_negative_index("alpha", "N", N); + vector alpha(N); + stan::math::initialize(alpha, DUMMY_VAR__); + stan::math::fill(alpha,DUMMY_VAR__); + current_statement_begin__ = 53; + validate_non_negative_index("beta", "N", N); + vector beta(N); + stan::math::initialize(beta, DUMMY_VAR__); + stan::math::fill(beta,DUMMY_VAR__); + current_statement_begin__ = 54; + validate_non_negative_index("tau", "N", N); + vector tau(N); + stan::math::initialize(tau, DUMMY_VAR__); + stan::math::fill(tau,DUMMY_VAR__); + + + current_statement_begin__ = 56; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 57; + stan::model::assign(alpha, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), + "assigning variable alpha"); + current_statement_begin__ = 58; + stan::model::assign(beta, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), + "assigning variable beta"); + current_statement_begin__ = 59; + stan::model::assign(tau, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), + "assigning variable tau"); + } + + // validate transformed parameters + current_statement_begin__ = 52; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); + check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); + } + current_statement_begin__ = 53; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"beta[k0__]",beta[k0__],0); + check_less_or_equal(function__,"beta[k0__]",beta[k0__],10); + } + current_statement_begin__ = 54; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); + check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); + } + + // write transformed parameters + if (include_tparams__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(alpha[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(beta[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(tau[k_0__]); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 114; + local_scalar_t__ mu_alpha; + (void) mu_alpha; // dummy to suppress unused var warning + + stan::math::initialize(mu_alpha, DUMMY_VAR__); + stan::math::fill(mu_alpha,DUMMY_VAR__); + current_statement_begin__ = 115; + local_scalar_t__ mu_beta; + (void) mu_beta; // dummy to suppress unused var warning + + stan::math::initialize(mu_beta, DUMMY_VAR__); + stan::math::fill(mu_beta,DUMMY_VAR__); + current_statement_begin__ = 116; + local_scalar_t__ mu_tau; + (void) mu_tau; // dummy to suppress unused var warning + + stan::math::initialize(mu_tau, DUMMY_VAR__); + stan::math::fill(mu_tau,DUMMY_VAR__); + current_statement_begin__ = 119; + validate_non_negative_index("log_lik", "N", N); + vector log_lik(N); + stan::math::initialize(log_lik, DUMMY_VAR__); + stan::math::fill(log_lik,DUMMY_VAR__); + current_statement_begin__ = 122; + validate_non_negative_index("y_pred", "N", N); + validate_non_negative_index("y_pred", "T", T); + vector > y_pred(N, (vector(T))); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred,DUMMY_VAR__); + + + current_statement_begin__ = 125; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 126; + for (int t = 1; t <= T; ++t) { + + current_statement_begin__ = 127; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + -(1), + "assigning variable y_pred"); + } + } + current_statement_begin__ = 131; + stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_pr,1,"mu_pr",1)) * 20)); + current_statement_begin__ = 132; + stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_pr,2,"mu_pr",1)) * 10)); + current_statement_begin__ = 133; + stan::math::assign(mu_tau, (Phi_approx(get_base1(mu_pr,3,"mu_pr",1)) * 10)); + + current_statement_begin__ = 136; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 138; + local_scalar_t__ util; + (void) util; // dummy to suppress unused var warning + + stan::math::initialize(util, DUMMY_VAR__); + stan::math::fill(util,DUMMY_VAR__); + current_statement_begin__ = 139; + local_scalar_t__ mu_old; + (void) mu_old; // dummy to suppress unused var warning + + stan::math::initialize(mu_old, DUMMY_VAR__); + stan::math::fill(mu_old,DUMMY_VAR__); + current_statement_begin__ = 140; + local_scalar_t__ mu_new; + (void) mu_new; // dummy to suppress unused var warning + + stan::math::initialize(mu_new, DUMMY_VAR__); + stan::math::fill(mu_new,DUMMY_VAR__); + current_statement_begin__ = 141; + local_scalar_t__ k_old; + (void) k_old; // dummy to suppress unused var warning + + stan::math::initialize(k_old, DUMMY_VAR__); + stan::math::fill(k_old,DUMMY_VAR__); + current_statement_begin__ = 142; + local_scalar_t__ k_new; + (void) k_new; // dummy to suppress unused var warning + + stan::math::initialize(k_new, DUMMY_VAR__); + stan::math::fill(k_new,DUMMY_VAR__); + current_statement_begin__ = 143; + local_scalar_t__ sig2_old; + (void) sig2_old; // dummy to suppress unused var warning + + stan::math::initialize(sig2_old, DUMMY_VAR__); + stan::math::fill(sig2_old,DUMMY_VAR__); + current_statement_begin__ = 144; + local_scalar_t__ sig2_new; + (void) sig2_new; // dummy to suppress unused var warning + + stan::math::initialize(sig2_new, DUMMY_VAR__); + stan::math::fill(sig2_new,DUMMY_VAR__); + current_statement_begin__ = 145; + local_scalar_t__ nu_old; + (void) nu_old; // dummy to suppress unused var warning + + stan::math::initialize(nu_old, DUMMY_VAR__); + stan::math::fill(nu_old,DUMMY_VAR__); + current_statement_begin__ = 146; + local_scalar_t__ nu_new; + (void) nu_new; // dummy to suppress unused var warning + + stan::math::initialize(nu_new, DUMMY_VAR__); + stan::math::fill(nu_new,DUMMY_VAR__); + current_statement_begin__ = 147; + local_scalar_t__ PE; + (void) PE; // dummy to suppress unused var warning + + stan::math::initialize(PE, DUMMY_VAR__); + stan::math::fill(PE,DUMMY_VAR__); + + + current_statement_begin__ = 150; + stan::math::assign(mu_old, mu0); + current_statement_begin__ = 151; + stan::math::assign(k_old, k0); + current_statement_begin__ = 152; + stan::math::assign(sig2_old, sig20); + current_statement_begin__ = 153; + stan::math::assign(nu_old, nu0); + current_statement_begin__ = 155; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + 0, + "assigning variable log_lik"); + current_statement_begin__ = 157; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 158; + stan::math::assign(k_new, (k_old + 1)); + current_statement_begin__ = 159; + stan::math::assign(nu_new, (nu_old + 1)); + current_statement_begin__ = 160; + stan::math::assign(mu_new, (((k_old / k_new) * mu_old) + ((1 / k_new) * get_base1(get_base1(offer,i,"offer",1),t,"offer",2)))); + current_statement_begin__ = 161; + stan::math::assign(sig2_new, (((nu_old / nu_new) * sig2_old) + (((1 / nu_new) * (k_old / k_new)) * pow((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old),2)))); + current_statement_begin__ = 163; + stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old)); + current_statement_begin__ = 164; + stan::math::assign(util, ((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((mu_new - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0))) - (get_base1(beta,i,"beta",1) * stan::math::fmax((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_new),0.0)))); + current_statement_begin__ = 166; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2),(util * get_base1(tau,i,"tau",1)))), + "assigning variable log_lik"); + current_statement_begin__ = 169; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + bernoulli_rng(inv_logit((util * get_base1(tau,i,"tau",1))), base_rng__), + "assigning variable y_pred"); + current_statement_begin__ = 172; + stan::math::assign(mu_old, mu_new); + current_statement_begin__ = 173; + stan::math::assign(sig2_old, sig2_new); + current_statement_begin__ = 174; + stan::math::assign(k_old, k_new); + current_statement_begin__ = 175; + stan::math::assign(nu_old, nu_new); + } + } + } + + // validate generated quantities + current_statement_begin__ = 114; + check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); + check_less_or_equal(function__,"mu_alpha",mu_alpha,20); + current_statement_begin__ = 115; + check_greater_or_equal(function__,"mu_beta",mu_beta,0); + check_less_or_equal(function__,"mu_beta",mu_beta,10); + current_statement_begin__ = 116; + check_greater_or_equal(function__,"mu_tau",mu_tau,0); + check_less_or_equal(function__,"mu_tau",mu_tau,10); + current_statement_begin__ = 119; + current_statement_begin__ = 122; + + // write generated quantities + vars__.push_back(mu_alpha); + vars__.push_back(mu_beta); + vars__.push_back(mu_tau); + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(log_lik[k_0__]); + } + for (int k_1__ = 0; k_1__ < T; ++k_1__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(y_pred[k_0__][k_1__]); + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + + static std::string model_name() { + return "model_ug_bayes"; + } + + + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_alpha"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_tau"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + + + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "beta" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_alpha"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_beta"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_tau"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + +}; // model + +} + +typedef model_ug_bayes_namespace::model_ug_bayes stan_model; + + +#endif diff --git a/inst/stan_files/ug_bayes.o b/inst/stan_files/ug_bayes.o new file mode 100644 index 00000000..3b30b638 Binary files /dev/null and b/inst/stan_files/ug_bayes.o differ diff --git a/exec/ug_bayes.stan b/inst/stan_files/ug_bayes.stan old mode 100755 new mode 100644 similarity index 82% rename from exec/ug_bayes.stan rename to inst/stan_files/ug_bayes.stan index b82b5c26..6136e708 --- a/exec/ug_bayes.stan +++ b/inst/stan_files/ug_bayes.stan @@ -1,3 +1,5 @@ +#include /pre/license.stan + data { int N; int T; @@ -23,36 +25,36 @@ transformed data { parameters { // Declare all parameters as vectors for vectorizing // Hyper(group)-parameters - vector[3] mu_p; + vector[3] mu_pr; vector[3] sigma; // Subject-level raw parameters (for Matt trick) vector[N] alpha_pr; // alpha: envy - vector[N] Beta_pr; // Beta: guilt. Use a capital letter B because of built-in 'beta' + vector[N] beta_pr; // beta: guilt vector[N] tau_pr; // tau: inverse temperature } transformed parameters { // Transform subject-level raw parameters real alpha[N]; - real Beta[N]; + real beta[N]; real tau[N]; for (i in 1:N) { - alpha[i] = Phi_approx(mu_p[1] + sigma[1] * alpha_pr[i]) * 20; - Beta[i] = Phi_approx(mu_p[2] + sigma[2] * Beta_pr[i]) * 10; - tau[i] = Phi_approx(mu_p[3] + sigma[3] * tau_pr[i]) * 10; + alpha[i] = Phi_approx(mu_pr[1] + sigma[1] * alpha_pr[i]) * 20; + beta[i] = Phi_approx(mu_pr[2] + sigma[2] * beta_pr[i]) * 10; + tau[i] = Phi_approx(mu_pr[3] + sigma[3] * tau_pr[i]) * 10; } } model { // Hyperparameters - mu_p ~ normal(0, 1); + mu_pr ~ normal(0, 1); sigma ~ normal(0, 0.2); // individual parameters alpha_pr ~ normal(0, 1.0); - Beta_pr ~ normal(0, 1.0); + beta_pr ~ normal(0, 1.0); tau_pr ~ normal(0, 1.0); for (i in 1:N) { @@ -81,7 +83,7 @@ model { sig2_new = (nu_old/nu_new) * sig2_old + (1/nu_new) * (k_old/k_new) * pow((offer[i, t] - mu_old), 2); PE = offer[i, t] - mu_old; - util = offer[i, t] - alpha[i] * fmax(mu_new - offer[i, t], 0.0) - Beta[i] * fmax(offer[i, t] - mu_new, 0.0); + util = offer[i, t] - alpha[i] * fmax(mu_new - offer[i, t], 0.0) - beta[i] * fmax(offer[i, t] - mu_new, 0.0); accept[i, t] ~ bernoulli_logit(util * tau[i]); @@ -97,7 +99,7 @@ model { generated quantities { // For group level parameters real mu_alpha; - real mu_Beta; + real mu_beta; real mu_tau; // For log likelihood calculation @@ -113,9 +115,9 @@ generated quantities { } } - mu_alpha = Phi_approx(mu_p[1]) * 20; - mu_Beta = Phi_approx(mu_p[2]) * 10; - mu_tau = Phi_approx(mu_p[3]) * 10; + mu_alpha = Phi_approx(mu_pr[1]) * 20; + mu_beta = Phi_approx(mu_pr[2]) * 10; + mu_tau = Phi_approx(mu_pr[3]) * 10; { // local section, this saves time and space for (i in 1:N) { @@ -146,9 +148,9 @@ generated quantities { sig2_new = (nu_old/nu_new) * sig2_old + (1/nu_new) * (k_old/k_new) * pow((offer[i, t] - mu_old), 2); PE = offer[i, t] - mu_old; - util = offer[i, t] - alpha[i] * fmax(mu_new - offer[i, t], 0.0) - Beta[i] * fmax(offer[i, t] - mu_new, 0.0); + util = offer[i, t] - alpha[i] * fmax(mu_new - offer[i, t], 0.0) - beta[i] * fmax(offer[i, t] - mu_new, 0.0); - log_lik[i] = log_lik[i] + bernoulli_logit_lpmf(accept[i, t] | util * tau[i]); + log_lik[i] += bernoulli_logit_lpmf(accept[i, t] | util * tau[i]); // generate posterior prediction for current trial y_pred[i, t] = bernoulli_rng(inv_logit(util * tau[i])); diff --git a/inst/stan_files/ug_delta.hpp b/inst/stan_files/ug_delta.hpp new file mode 100644 index 00000000..eb48d796 --- /dev/null +++ b/inst/stan_files/ug_delta.hpp @@ -0,0 +1,997 @@ +/* + hBayesDM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + hBayesDM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with hBayesDM. If not, see . +*/ +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.18.0 + +#include + +namespace model_ug_delta_namespace { + +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; + +static int current_statement_begin__; + +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_ug_delta"); + reader.add_event(0, 0, "include", "/pre/license.stan"); + reader.add_event(0, 0, "start", "/pre/license.stan"); + reader.add_event(14, 14, "end", "/pre/license.stan"); + reader.add_event(14, 1, "restart", "model_ug_delta"); + reader.add_event(143, 128, "end", "model_ug_delta"); + return reader; +} + +#include + class model_ug_delta : public prob_grad { +private: + int N; + int T; + vector Tsubj; + vector > offer; + vector > accept; +public: + model_ug_delta(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, 0, pstream__); + } + + model_ug_delta(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, random_seed__, pstream__); + } + + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + + current_statement_begin__ = -1; + + static const char* function__ = "model_ug_delta_namespace::model_ug_delta"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + // initialize member variables + try { + current_statement_begin__ = 17; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + current_statement_begin__ = 18; + context__.validate_dims("data initialization", "T", "int", context__.to_vec()); + T = int(0); + vals_i__ = context__.vals_i("T"); + pos__ = 0; + T = vals_i__[pos__++]; + current_statement_begin__ = 19; + validate_non_negative_index("Tsubj", "N", N); + context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); + validate_non_negative_index("Tsubj", "N", N); + Tsubj = std::vector(N,int(0)); + vals_i__ = context__.vals_i("Tsubj"); + pos__ = 0; + size_t Tsubj_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { + Tsubj[i_0__] = vals_i__[pos__++]; + } + current_statement_begin__ = 20; + validate_non_negative_index("offer", "N", N); + validate_non_negative_index("offer", "T", T); + context__.validate_dims("data initialization", "offer", "double", context__.to_vec(N,T)); + validate_non_negative_index("offer", "N", N); + validate_non_negative_index("offer", "T", T); + offer = std::vector >(N,std::vector(T,double(0))); + vals_r__ = context__.vals_r("offer"); + pos__ = 0; + size_t offer_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < offer_limit_1__; ++i_1__) { + size_t offer_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < offer_limit_0__; ++i_0__) { + offer[i_0__][i_1__] = vals_r__[pos__++]; + } + } + current_statement_begin__ = 21; + validate_non_negative_index("accept", "N", N); + validate_non_negative_index("accept", "T", T); + context__.validate_dims("data initialization", "accept", "int", context__.to_vec(N,T)); + validate_non_negative_index("accept", "N", N); + validate_non_negative_index("accept", "T", T); + accept = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("accept"); + pos__ = 0; + size_t accept_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < accept_limit_1__; ++i_1__) { + size_t accept_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < accept_limit_0__; ++i_0__) { + accept[i_0__][i_1__] = vals_i__[pos__++]; + } + } + + // validate, data variables + current_statement_begin__ = 17; + check_greater_or_equal(function__,"N",N,1); + current_statement_begin__ = 18; + check_greater_or_equal(function__,"T",T,1); + current_statement_begin__ = 19; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); + check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); + } + current_statement_begin__ = 20; + current_statement_begin__ = 21; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],-(1)); + check_less_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],1); + } + } + // initialize data variables + + + // validate transformed data + + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 30; + validate_non_negative_index("mu_pr", "3", 3); + num_params_r__ += 3; + current_statement_begin__ = 31; + validate_non_negative_index("sigma", "3", 3); + num_params_r__ += 3; + current_statement_begin__ = 34; + validate_non_negative_index("alpha_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 35; + validate_non_negative_index("tau_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 36; + validate_non_negative_index("ep_pr", "N", N); + num_params_r__ += N; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + ~model_ug_delta() { } + + + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + stan::io::writer writer__(params_r__,params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + + if (!(context__.contains_r("mu_pr"))) + throw std::runtime_error("variable mu_pr missing"); + vals_r__ = context__.vals_r("mu_pr"); + pos__ = 0U; + validate_non_negative_index("mu_pr", "3", 3); + context__.validate_dims("initialization", "mu_pr", "vector_d", context__.to_vec(3)); + vector_d mu_pr(static_cast(3)); + for (int j1__ = 0U; j1__ < 3; ++j1__) + mu_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(mu_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable mu_pr: ") + e.what()); + } + + if (!(context__.contains_r("sigma"))) + throw std::runtime_error("variable sigma missing"); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "3", 3); + context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); + vector_d sigma(static_cast(3)); + for (int j1__ = 0U; j1__ < 3; ++j1__) + sigma(j1__) = vals_r__[pos__++]; + try { + writer__.vector_lb_unconstrain(0,sigma); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); + } + + if (!(context__.contains_r("alpha_pr"))) + throw std::runtime_error("variable alpha_pr missing"); + vals_r__ = context__.vals_r("alpha_pr"); + pos__ = 0U; + validate_non_negative_index("alpha_pr", "N", N); + context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); + vector_d alpha_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + alpha_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(alpha_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); + } + + if (!(context__.contains_r("tau_pr"))) + throw std::runtime_error("variable tau_pr missing"); + vals_r__ = context__.vals_r("tau_pr"); + pos__ = 0U; + validate_non_negative_index("tau_pr", "N", N); + context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); + vector_d tau_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + tau_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(tau_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); + } + + if (!(context__.contains_r("ep_pr"))) + throw std::runtime_error("variable ep_pr missing"); + vals_r__ = context__.vals_r("ep_pr"); + pos__ = 0U; + validate_non_negative_index("ep_pr", "N", N); + context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); + vector_d ep_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + ep_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(ep_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); + } + + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + + + template + T__ log_prob(vector& params_r__, + vector& params_i__, + std::ostream* pstream__ = 0) const { + + typedef T__ local_scalar_t__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + + try { + // model parameters + stan::io::reader in__(params_r__,params_i__); + + Eigen::Matrix mu_pr; + (void) mu_pr; // dummy to suppress unused var warning + if (jacobian__) + mu_pr = in__.vector_constrain(3,lp__); + else + mu_pr = in__.vector_constrain(3); + + Eigen::Matrix sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.vector_lb_constrain(0,3,lp__); + else + sigma = in__.vector_lb_constrain(0,3); + + Eigen::Matrix alpha_pr; + (void) alpha_pr; // dummy to suppress unused var warning + if (jacobian__) + alpha_pr = in__.vector_constrain(N,lp__); + else + alpha_pr = in__.vector_constrain(N); + + Eigen::Matrix tau_pr; + (void) tau_pr; // dummy to suppress unused var warning + if (jacobian__) + tau_pr = in__.vector_constrain(N,lp__); + else + tau_pr = in__.vector_constrain(N); + + Eigen::Matrix ep_pr; + (void) ep_pr; // dummy to suppress unused var warning + if (jacobian__) + ep_pr = in__.vector_constrain(N,lp__); + else + ep_pr = in__.vector_constrain(N); + + + // transformed parameters + current_statement_begin__ = 41; + validate_non_negative_index("alpha", "N", N); + vector alpha(N); + stan::math::initialize(alpha, DUMMY_VAR__); + stan::math::fill(alpha,DUMMY_VAR__); + current_statement_begin__ = 42; + validate_non_negative_index("tau", "N", N); + vector tau(N); + stan::math::initialize(tau, DUMMY_VAR__); + stan::math::fill(tau,DUMMY_VAR__); + current_statement_begin__ = 43; + validate_non_negative_index("ep", "N", N); + vector ep(N); + stan::math::initialize(ep, DUMMY_VAR__); + stan::math::fill(ep,DUMMY_VAR__); + + + current_statement_begin__ = 45; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 46; + stan::model::assign(alpha, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), + "assigning variable alpha"); + current_statement_begin__ = 47; + stan::model::assign(tau, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), + "assigning variable tau"); + current_statement_begin__ = 48; + stan::model::assign(ep, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), + "assigning variable ep"); + } + + // validate transformed parameters + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(alpha[i0__])) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(tau[i0__])) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(ep[i0__])) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 41; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); + check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); + } + current_statement_begin__ = 42; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); + check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); + } + current_statement_begin__ = 43; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"ep[k0__]",ep[k0__],0); + check_less_or_equal(function__,"ep[k0__]",ep[k0__],1); + } + + // model body + + current_statement_begin__ = 54; + lp_accum__.add(normal_log(mu_pr, 0, 1)); + current_statement_begin__ = 55; + lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); + current_statement_begin__ = 58; + lp_accum__.add(normal_log(alpha_pr, 0, 1.0)); + current_statement_begin__ = 59; + lp_accum__.add(normal_log(tau_pr, 0, 1.0)); + current_statement_begin__ = 60; + lp_accum__.add(normal_log(ep_pr, 0, 1.0)); + current_statement_begin__ = 62; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 64; + local_scalar_t__ f; + (void) f; // dummy to suppress unused var warning + + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f,DUMMY_VAR__); + current_statement_begin__ = 65; + local_scalar_t__ PE; + (void) PE; // dummy to suppress unused var warning + + stan::math::initialize(PE, DUMMY_VAR__); + stan::math::fill(PE,DUMMY_VAR__); + current_statement_begin__ = 66; + local_scalar_t__ util; + (void) util; // dummy to suppress unused var warning + + stan::math::initialize(util, DUMMY_VAR__); + stan::math::fill(util,DUMMY_VAR__); + + + current_statement_begin__ = 69; + stan::math::assign(f, 10.0); + current_statement_begin__ = 71; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 73; + stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - f)); + current_statement_begin__ = 76; + stan::math::assign(util, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((f - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0)))); + current_statement_begin__ = 79; + lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2), (util * get_base1(tau,i,"tau",1)))); + current_statement_begin__ = 82; + stan::math::assign(f, (f + (get_base1(ep,i,"ep",1) * PE))); + } + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + + lp_accum__.add(lp__); + return lp_accum__.sum(); + + } // log_prob() + + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + + + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("mu_pr"); + names__.push_back("sigma"); + names__.push_back("alpha_pr"); + names__.push_back("tau_pr"); + names__.push_back("ep_pr"); + names__.push_back("alpha"); + names__.push_back("tau"); + names__.push_back("ep"); + names__.push_back("mu_alpha"); + names__.push_back("mu_tau"); + names__.push_back("mu_ep"); + names__.push_back("log_lik"); + names__.push_back("y_pred"); + } + + + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(3); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(3); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dims__.push_back(T); + dimss__.push_back(dims__); + } + + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + + vars__.resize(0); + stan::io::reader in__(params_r__,params_i__); + static const char* function__ = "model_ug_delta_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + vector_d mu_pr = in__.vector_constrain(3); + vector_d sigma = in__.vector_lb_constrain(0,3); + vector_d alpha_pr = in__.vector_constrain(N); + vector_d tau_pr = in__.vector_constrain(N); + vector_d ep_pr = in__.vector_constrain(N); + for (int k_0__ = 0; k_0__ < 3; ++k_0__) { + vars__.push_back(mu_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < 3; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(alpha_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(tau_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(ep_pr[k_0__]); + } + + // declare and define transformed parameters + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + try { + current_statement_begin__ = 41; + validate_non_negative_index("alpha", "N", N); + vector alpha(N); + stan::math::initialize(alpha, DUMMY_VAR__); + stan::math::fill(alpha,DUMMY_VAR__); + current_statement_begin__ = 42; + validate_non_negative_index("tau", "N", N); + vector tau(N); + stan::math::initialize(tau, DUMMY_VAR__); + stan::math::fill(tau,DUMMY_VAR__); + current_statement_begin__ = 43; + validate_non_negative_index("ep", "N", N); + vector ep(N); + stan::math::initialize(ep, DUMMY_VAR__); + stan::math::fill(ep,DUMMY_VAR__); + + + current_statement_begin__ = 45; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 46; + stan::model::assign(alpha, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), + "assigning variable alpha"); + current_statement_begin__ = 47; + stan::model::assign(tau, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), + "assigning variable tau"); + current_statement_begin__ = 48; + stan::model::assign(ep, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), + "assigning variable ep"); + } + + // validate transformed parameters + current_statement_begin__ = 41; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); + check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); + } + current_statement_begin__ = 42; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); + check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); + } + current_statement_begin__ = 43; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"ep[k0__]",ep[k0__],0); + check_less_or_equal(function__,"ep[k0__]",ep[k0__],1); + } + + // write transformed parameters + if (include_tparams__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(alpha[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(tau[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(ep[k_0__]); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 90; + local_scalar_t__ mu_alpha; + (void) mu_alpha; // dummy to suppress unused var warning + + stan::math::initialize(mu_alpha, DUMMY_VAR__); + stan::math::fill(mu_alpha,DUMMY_VAR__); + current_statement_begin__ = 91; + local_scalar_t__ mu_tau; + (void) mu_tau; // dummy to suppress unused var warning + + stan::math::initialize(mu_tau, DUMMY_VAR__); + stan::math::fill(mu_tau,DUMMY_VAR__); + current_statement_begin__ = 92; + local_scalar_t__ mu_ep; + (void) mu_ep; // dummy to suppress unused var warning + + stan::math::initialize(mu_ep, DUMMY_VAR__); + stan::math::fill(mu_ep,DUMMY_VAR__); + current_statement_begin__ = 95; + validate_non_negative_index("log_lik", "N", N); + vector log_lik(N); + stan::math::initialize(log_lik, DUMMY_VAR__); + stan::math::fill(log_lik,DUMMY_VAR__); + current_statement_begin__ = 98; + validate_non_negative_index("y_pred", "N", N); + validate_non_negative_index("y_pred", "T", T); + vector > y_pred(N, (vector(T))); + stan::math::initialize(y_pred, DUMMY_VAR__); + stan::math::fill(y_pred,DUMMY_VAR__); + + + current_statement_begin__ = 101; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 102; + for (int t = 1; t <= T; ++t) { + + current_statement_begin__ = 103; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + -(1), + "assigning variable y_pred"); + } + } + current_statement_begin__ = 107; + stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_pr,1,"mu_pr",1)) * 20)); + current_statement_begin__ = 108; + stan::math::assign(mu_tau, (Phi_approx(get_base1(mu_pr,2,"mu_pr",1)) * 10)); + current_statement_begin__ = 109; + stan::math::assign(mu_ep, Phi_approx(get_base1(mu_pr,3,"mu_pr",1))); + + current_statement_begin__ = 112; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 114; + local_scalar_t__ f; + (void) f; // dummy to suppress unused var warning + + stan::math::initialize(f, DUMMY_VAR__); + stan::math::fill(f,DUMMY_VAR__); + current_statement_begin__ = 115; + local_scalar_t__ PE; + (void) PE; // dummy to suppress unused var warning + + stan::math::initialize(PE, DUMMY_VAR__); + stan::math::fill(PE,DUMMY_VAR__); + current_statement_begin__ = 116; + local_scalar_t__ util; + (void) util; // dummy to suppress unused var warning + + stan::math::initialize(util, DUMMY_VAR__); + stan::math::fill(util,DUMMY_VAR__); + + + current_statement_begin__ = 119; + stan::math::assign(f, 10.0); + current_statement_begin__ = 120; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + 0.0, + "assigning variable log_lik"); + current_statement_begin__ = 122; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 124; + stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - f)); + current_statement_begin__ = 127; + stan::math::assign(util, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((f - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0)))); + current_statement_begin__ = 130; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2),(util * get_base1(tau,i,"tau",1)))), + "assigning variable log_lik"); + current_statement_begin__ = 133; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), + bernoulli_rng(inv_logit((util * get_base1(tau,i,"tau",1))), base_rng__), + "assigning variable y_pred"); + current_statement_begin__ = 136; + stan::math::assign(f, (f + (get_base1(ep,i,"ep",1) * PE))); + } + } + } + + // validate generated quantities + current_statement_begin__ = 90; + check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); + check_less_or_equal(function__,"mu_alpha",mu_alpha,20); + current_statement_begin__ = 91; + check_greater_or_equal(function__,"mu_tau",mu_tau,0); + check_less_or_equal(function__,"mu_tau",mu_tau,10); + current_statement_begin__ = 92; + check_greater_or_equal(function__,"mu_ep",mu_ep,0); + check_less_or_equal(function__,"mu_ep",mu_ep,1); + current_statement_begin__ = 95; + current_statement_begin__ = 98; + + // write generated quantities + vars__.push_back(mu_alpha); + vars__.push_back(mu_tau); + vars__.push_back(mu_ep); + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(log_lik[k_0__]); + } + for (int k_1__ = 0; k_1__ < T; ++k_1__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(y_pred[k_0__][k_1__]); + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + + static std::string model_name() { + return "model_ug_delta"; + } + + + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "ep_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "ep" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_alpha"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_tau"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_ep"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + + + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "ep_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "alpha" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "tau" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "ep" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_alpha"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_tau"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_ep"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_1__ = 1; k_1__ <= T; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + +}; // model + +} + +typedef model_ug_delta_namespace::model_ug_delta stan_model; + + +#endif diff --git a/inst/stan_files/ug_delta.o b/inst/stan_files/ug_delta.o new file mode 100644 index 00000000..7c4b5ac7 Binary files /dev/null and b/inst/stan_files/ug_delta.o differ diff --git a/exec/ug_delta.stan b/inst/stan_files/ug_delta.stan old mode 100755 new mode 100644 similarity index 83% rename from exec/ug_delta.stan rename to inst/stan_files/ug_delta.stan index b2e1ace2..9bb70e0a --- a/exec/ug_delta.stan +++ b/inst/stan_files/ug_delta.stan @@ -1,3 +1,5 @@ +#include /pre/license.stan + data { int N; int T; @@ -12,37 +14,37 @@ transformed data { parameters { // Declare all parameters as vectors for vectorizing // Hyper(group)-parameters - vector[3] mu_p; + vector[3] mu_pr; vector[3] sigma; // Subject-level raw parameters (for Matt trick) - vector[N] ep_pr; // ep: Norm adaptation rate vector[N] alpha_pr; // alpha: Envy (sensitivity to norm prediction error) vector[N] tau_pr; // tau: Inverse temperature + vector[N] ep_pr; // ep: Norm adaptation rate } transformed parameters { // Transform subject-level raw parameters - real ep[N]; real alpha[N]; real tau[N]; + real ep[N]; for (i in 1:N) { - ep[i] = Phi_approx(mu_p[1] + sigma[1] * ep_pr[i]); - tau[i] = Phi_approx(mu_p[2] + sigma[2] * tau_pr[i]) * 10; - alpha[i] = Phi_approx(mu_p[3] + sigma[3] * alpha_pr[i]) * 20; + alpha[i] = Phi_approx(mu_pr[1] + sigma[1] * alpha_pr[i]) * 20; + tau[i] = Phi_approx(mu_pr[2] + sigma[2] * tau_pr[i]) * 10; + ep[i] = Phi_approx(mu_pr[3] + sigma[3] * ep_pr[i]); } } model { // Hyperparameters - mu_p ~ normal(0, 1); + mu_pr ~ normal(0, 1); sigma ~ normal(0, 0.2); // individual parameters - ep_pr ~ normal(0, 1.0); alpha_pr ~ normal(0, 1.0); tau_pr ~ normal(0, 1.0); + ep_pr ~ normal(0, 1.0); for (i in 1:N) { // Define values @@ -64,7 +66,7 @@ model { accept[i, t] ~ bernoulli_logit(util * tau[i]); // Update internal norm - f = f + ep[i] * PE; + f += ep[i] * PE; } // end of t loop } // end of i loop @@ -72,9 +74,9 @@ model { generated quantities { // For group level parameters - real mu_ep; - real mu_tau; real mu_alpha; + real mu_tau; + real mu_ep; // For log likelihood calculation real log_lik[N]; @@ -89,9 +91,9 @@ generated quantities { } } - mu_ep = Phi_approx(mu_p[1]); - mu_tau = Phi_approx(mu_p[2]) * 10; - mu_alpha = Phi_approx(mu_p[3]) * 20; + mu_alpha = Phi_approx(mu_pr[1]) * 20; + mu_tau = Phi_approx(mu_pr[2]) * 10; + mu_ep = Phi_approx(mu_pr[3]); { // local section, this saves time and space for (i in 1:N) { @@ -112,13 +114,13 @@ generated quantities { util = offer[i, t] - alpha[i] * fmax(f - offer[i, t], 0.0); // Calculate log likelihood - log_lik[i] = log_lik[i] + bernoulli_logit_lpmf(accept[i, t] | util * tau[i]); + log_lik[i] += bernoulli_logit_lpmf(accept[i, t] | util * tau[i]); // generate posterior prediction for current trial y_pred[i, t] = bernoulli_rng(inv_logit(util * tau[i])); // Update internal norm - f = f + ep[i] * PE; + f += ep[i] * PE; } // end of t loop } // end of i loop diff --git a/inst/stan_files/wcs_sql.hpp b/inst/stan_files/wcs_sql.hpp new file mode 100644 index 00000000..d880478f --- /dev/null +++ b/inst/stan_files/wcs_sql.hpp @@ -0,0 +1,1238 @@ +/* + hBayesDM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + hBayesDM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with hBayesDM. If not, see . +*/ +#ifndef MODELS_HPP +#define MODELS_HPP +#define STAN__SERVICES__COMMAND_HPP +#include +// Code generated by Stan version 2.18.0 + +#include + +namespace model_wcs_sql_namespace { + +using std::istream; +using std::string; +using std::stringstream; +using std::vector; +using stan::io::dump; +using stan::math::lgamma; +using stan::model::prob_grad; +using namespace stan::math; + +static int current_statement_begin__; + +stan::io::program_reader prog_reader__() { + stan::io::program_reader reader; + reader.add_event(0, 0, "start", "model_wcs_sql"); + reader.add_event(0, 0, "include", "/pre/license.stan"); + reader.add_event(0, 0, "start", "/pre/license.stan"); + reader.add_event(14, 14, "end", "/pre/license.stan"); + reader.add_event(14, 1, "restart", "model_wcs_sql"); + reader.add_event(190, 175, "end", "model_wcs_sql"); + return reader; +} + +#include + class model_wcs_sql : public prob_grad { +private: + int N; + int T; + vector Tsubj; + vector > > choice; + vector > outcome; + vector > choice_match_att; + vector deck_match_rule; + matrix_d initAtt; + matrix_d unit; +public: + model_wcs_sql(stan::io::var_context& context__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, 0, pstream__); + } + + model_wcs_sql(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__ = 0) + : prob_grad(0) { + ctor_body(context__, random_seed__, pstream__); + } + + void ctor_body(stan::io::var_context& context__, + unsigned int random_seed__, + std::ostream* pstream__) { + typedef double local_scalar_t__; + + boost::ecuyer1988 base_rng__ = + stan::services::util::create_rng(random_seed__, 0); + (void) base_rng__; // suppress unused var warning + + current_statement_begin__ = -1; + + static const char* function__ = "model_wcs_sql_namespace::model_wcs_sql"; + (void) function__; // dummy to suppress unused var warning + size_t pos__; + (void) pos__; // dummy to suppress unused var warning + std::vector vals_i__; + std::vector vals_r__; + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + // initialize member variables + try { + current_statement_begin__ = 17; + context__.validate_dims("data initialization", "N", "int", context__.to_vec()); + N = int(0); + vals_i__ = context__.vals_i("N"); + pos__ = 0; + N = vals_i__[pos__++]; + current_statement_begin__ = 18; + context__.validate_dims("data initialization", "T", "int", context__.to_vec()); + T = int(0); + vals_i__ = context__.vals_i("T"); + pos__ = 0; + T = vals_i__[pos__++]; + current_statement_begin__ = 19; + validate_non_negative_index("Tsubj", "N", N); + context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); + validate_non_negative_index("Tsubj", "N", N); + Tsubj = std::vector(N,int(0)); + vals_i__ = context__.vals_i("Tsubj"); + pos__ = 0; + size_t Tsubj_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { + Tsubj[i_0__] = vals_i__[pos__++]; + } + current_statement_begin__ = 21; + validate_non_negative_index("choice", "N", N); + validate_non_negative_index("choice", "4", 4); + validate_non_negative_index("choice", "T", T); + context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,4,T)); + validate_non_negative_index("choice", "N", N); + validate_non_negative_index("choice", "4", 4); + validate_non_negative_index("choice", "T", T); + choice = std::vector > >(N,std::vector >(4,std::vector(T,int(0)))); + vals_i__ = context__.vals_i("choice"); + pos__ = 0; + size_t choice_limit_2__ = T; + for (size_t i_2__ = 0; i_2__ < choice_limit_2__; ++i_2__) { + size_t choice_limit_1__ = 4; + for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { + size_t choice_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { + choice[i_0__][i_1__][i_2__] = vals_i__[pos__++]; + } + } + } + current_statement_begin__ = 22; + validate_non_negative_index("outcome", "N", N); + validate_non_negative_index("outcome", "T", T); + context__.validate_dims("data initialization", "outcome", "int", context__.to_vec(N,T)); + validate_non_negative_index("outcome", "N", N); + validate_non_negative_index("outcome", "T", T); + outcome = std::vector >(N,std::vector(T,int(0))); + vals_i__ = context__.vals_i("outcome"); + pos__ = 0; + size_t outcome_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { + size_t outcome_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { + outcome[i_0__][i_1__] = vals_i__[pos__++]; + } + } + current_statement_begin__ = 23; + validate_non_negative_index("choice_match_att", "N", N); + validate_non_negative_index("choice_match_att", "T", T); + validate_non_negative_index("choice_match_att", "1", 1); + validate_non_negative_index("choice_match_att", "3", 3); + context__.validate_dims("data initialization", "choice_match_att", "matrix_d", context__.to_vec(N,T,1,3)); + validate_non_negative_index("choice_match_att", "N", N); + validate_non_negative_index("choice_match_att", "T", T); + validate_non_negative_index("choice_match_att", "1", 1); + validate_non_negative_index("choice_match_att", "3", 3); + choice_match_att = std::vector >(N,std::vector(T,matrix_d(static_cast(1),static_cast(3)))); + vals_r__ = context__.vals_r("choice_match_att"); + pos__ = 0; + size_t choice_match_att_m_mat_lim__ = 1; + size_t choice_match_att_n_mat_lim__ = 3; + for (size_t n_mat__ = 0; n_mat__ < choice_match_att_n_mat_lim__; ++n_mat__) { + for (size_t m_mat__ = 0; m_mat__ < choice_match_att_m_mat_lim__; ++m_mat__) { + size_t choice_match_att_limit_1__ = T; + for (size_t i_1__ = 0; i_1__ < choice_match_att_limit_1__; ++i_1__) { + size_t choice_match_att_limit_0__ = N; + for (size_t i_0__ = 0; i_0__ < choice_match_att_limit_0__; ++i_0__) { + choice_match_att[i_0__][i_1__](m_mat__,n_mat__) = vals_r__[pos__++]; + } + } + } + } + current_statement_begin__ = 24; + validate_non_negative_index("deck_match_rule", "T", T); + validate_non_negative_index("deck_match_rule", "3", 3); + validate_non_negative_index("deck_match_rule", "4", 4); + context__.validate_dims("data initialization", "deck_match_rule", "matrix_d", context__.to_vec(T,3,4)); + validate_non_negative_index("deck_match_rule", "T", T); + validate_non_negative_index("deck_match_rule", "3", 3); + validate_non_negative_index("deck_match_rule", "4", 4); + deck_match_rule = std::vector(T,matrix_d(static_cast(3),static_cast(4))); + vals_r__ = context__.vals_r("deck_match_rule"); + pos__ = 0; + size_t deck_match_rule_m_mat_lim__ = 3; + size_t deck_match_rule_n_mat_lim__ = 4; + for (size_t n_mat__ = 0; n_mat__ < deck_match_rule_n_mat_lim__; ++n_mat__) { + for (size_t m_mat__ = 0; m_mat__ < deck_match_rule_m_mat_lim__; ++m_mat__) { + size_t deck_match_rule_limit_0__ = T; + for (size_t i_0__ = 0; i_0__ < deck_match_rule_limit_0__; ++i_0__) { + deck_match_rule[i_0__](m_mat__,n_mat__) = vals_r__[pos__++]; + } + } + } + + // validate, data variables + current_statement_begin__ = 17; + check_greater_or_equal(function__,"N",N,1); + current_statement_begin__ = 18; + check_greater_or_equal(function__,"T",T,1); + current_statement_begin__ = 19; + for (int k0__ = 0; k0__ < N; ++k0__) { + check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],40); + check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); + } + current_statement_begin__ = 21; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < 4; ++k1__) { + for (int k2__ = 0; k2__ < T; ++k2__) { + check_greater_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],0); + check_less_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],4); + } + } + } + current_statement_begin__ = 22; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"outcome[k0__][k1__]",outcome[k0__][k1__],-(1)); + check_less_or_equal(function__,"outcome[k0__][k1__]",outcome[k0__][k1__],1); + } + } + current_statement_begin__ = 23; + for (int k0__ = 0; k0__ < N; ++k0__) { + for (int k1__ = 0; k1__ < T; ++k1__) { + check_greater_or_equal(function__,"choice_match_att[k0__][k1__]",choice_match_att[k0__][k1__],0); + check_less_or_equal(function__,"choice_match_att[k0__][k1__]",choice_match_att[k0__][k1__],1); + } + } + current_statement_begin__ = 24; + for (int k0__ = 0; k0__ < T; ++k0__) { + check_greater_or_equal(function__,"deck_match_rule[k0__]",deck_match_rule[k0__],0); + check_less_or_equal(function__,"deck_match_rule[k0__]",deck_match_rule[k0__],1); + } + // initialize data variables + current_statement_begin__ = 28; + validate_non_negative_index("initAtt", "1", 1); + validate_non_negative_index("initAtt", "3", 3); + initAtt = matrix_d(static_cast(1),static_cast(3)); + stan::math::fill(initAtt,DUMMY_VAR__); + current_statement_begin__ = 29; + validate_non_negative_index("unit", "1", 1); + validate_non_negative_index("unit", "3", 3); + unit = matrix_d(static_cast(1),static_cast(3)); + stan::math::fill(unit,DUMMY_VAR__); + + current_statement_begin__ = 31; + stan::math::assign(initAtt, rep_matrix((1.0 / 3.0),1,3)); + current_statement_begin__ = 32; + stan::math::assign(unit, rep_matrix(1.0,1,3)); + + // validate transformed data + current_statement_begin__ = 28; + current_statement_begin__ = 29; + + // validate, set parameter ranges + num_params_r__ = 0U; + param_ranges_i__.clear(); + current_statement_begin__ = 37; + validate_non_negative_index("mu_pr", "3", 3); + num_params_r__ += 3; + current_statement_begin__ = 38; + validate_non_negative_index("sigma", "3", 3); + num_params_r__ += 3; + current_statement_begin__ = 41; + validate_non_negative_index("r_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 42; + validate_non_negative_index("p_pr", "N", N); + num_params_r__ += N; + current_statement_begin__ = 43; + validate_non_negative_index("d_pr", "N", N); + num_params_r__ += N; + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + ~model_wcs_sql() { } + + + void transform_inits(const stan::io::var_context& context__, + std::vector& params_i__, + std::vector& params_r__, + std::ostream* pstream__) const { + stan::io::writer writer__(params_r__,params_i__); + size_t pos__; + (void) pos__; // dummy call to supress warning + std::vector vals_r__; + std::vector vals_i__; + + if (!(context__.contains_r("mu_pr"))) + throw std::runtime_error("variable mu_pr missing"); + vals_r__ = context__.vals_r("mu_pr"); + pos__ = 0U; + validate_non_negative_index("mu_pr", "3", 3); + context__.validate_dims("initialization", "mu_pr", "vector_d", context__.to_vec(3)); + vector_d mu_pr(static_cast(3)); + for (int j1__ = 0U; j1__ < 3; ++j1__) + mu_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(mu_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable mu_pr: ") + e.what()); + } + + if (!(context__.contains_r("sigma"))) + throw std::runtime_error("variable sigma missing"); + vals_r__ = context__.vals_r("sigma"); + pos__ = 0U; + validate_non_negative_index("sigma", "3", 3); + context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); + vector_d sigma(static_cast(3)); + for (int j1__ = 0U; j1__ < 3; ++j1__) + sigma(j1__) = vals_r__[pos__++]; + try { + writer__.vector_lb_unconstrain(0,sigma); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); + } + + if (!(context__.contains_r("r_pr"))) + throw std::runtime_error("variable r_pr missing"); + vals_r__ = context__.vals_r("r_pr"); + pos__ = 0U; + validate_non_negative_index("r_pr", "N", N); + context__.validate_dims("initialization", "r_pr", "vector_d", context__.to_vec(N)); + vector_d r_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + r_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(r_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable r_pr: ") + e.what()); + } + + if (!(context__.contains_r("p_pr"))) + throw std::runtime_error("variable p_pr missing"); + vals_r__ = context__.vals_r("p_pr"); + pos__ = 0U; + validate_non_negative_index("p_pr", "N", N); + context__.validate_dims("initialization", "p_pr", "vector_d", context__.to_vec(N)); + vector_d p_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + p_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(p_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable p_pr: ") + e.what()); + } + + if (!(context__.contains_r("d_pr"))) + throw std::runtime_error("variable d_pr missing"); + vals_r__ = context__.vals_r("d_pr"); + pos__ = 0U; + validate_non_negative_index("d_pr", "N", N); + context__.validate_dims("initialization", "d_pr", "vector_d", context__.to_vec(N)); + vector_d d_pr(static_cast(N)); + for (int j1__ = 0U; j1__ < N; ++j1__) + d_pr(j1__) = vals_r__[pos__++]; + try { + writer__.vector_unconstrain(d_pr); + } catch (const std::exception& e) { + throw std::runtime_error(std::string("Error transforming variable d_pr: ") + e.what()); + } + + params_r__ = writer__.data_r(); + params_i__ = writer__.data_i(); + } + + void transform_inits(const stan::io::var_context& context, + Eigen::Matrix& params_r, + std::ostream* pstream__) const { + std::vector params_r_vec; + std::vector params_i_vec; + transform_inits(context, params_i_vec, params_r_vec, pstream__); + params_r.resize(params_r_vec.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r(i) = params_r_vec[i]; + } + + + template + T__ log_prob(vector& params_r__, + vector& params_i__, + std::ostream* pstream__ = 0) const { + + typedef T__ local_scalar_t__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + T__ lp__(0.0); + stan::math::accumulator lp_accum__; + + try { + // model parameters + stan::io::reader in__(params_r__,params_i__); + + Eigen::Matrix mu_pr; + (void) mu_pr; // dummy to suppress unused var warning + if (jacobian__) + mu_pr = in__.vector_constrain(3,lp__); + else + mu_pr = in__.vector_constrain(3); + + Eigen::Matrix sigma; + (void) sigma; // dummy to suppress unused var warning + if (jacobian__) + sigma = in__.vector_lb_constrain(0,3,lp__); + else + sigma = in__.vector_lb_constrain(0,3); + + Eigen::Matrix r_pr; + (void) r_pr; // dummy to suppress unused var warning + if (jacobian__) + r_pr = in__.vector_constrain(N,lp__); + else + r_pr = in__.vector_constrain(N); + + Eigen::Matrix p_pr; + (void) p_pr; // dummy to suppress unused var warning + if (jacobian__) + p_pr = in__.vector_constrain(N,lp__); + else + p_pr = in__.vector_constrain(N); + + Eigen::Matrix d_pr; + (void) d_pr; // dummy to suppress unused var warning + if (jacobian__) + d_pr = in__.vector_constrain(N,lp__); + else + d_pr = in__.vector_constrain(N); + + + // transformed parameters + current_statement_begin__ = 48; + validate_non_negative_index("r", "N", N); + Eigen::Matrix r(static_cast(N)); + (void) r; // dummy to suppress unused var warning + + stan::math::initialize(r, DUMMY_VAR__); + stan::math::fill(r,DUMMY_VAR__); + current_statement_begin__ = 49; + validate_non_negative_index("p", "N", N); + Eigen::Matrix p(static_cast(N)); + (void) p; // dummy to suppress unused var warning + + stan::math::initialize(p, DUMMY_VAR__); + stan::math::fill(p,DUMMY_VAR__); + current_statement_begin__ = 50; + validate_non_negative_index("d", "N", N); + Eigen::Matrix d(static_cast(N)); + (void) d; // dummy to suppress unused var warning + + stan::math::initialize(d, DUMMY_VAR__); + stan::math::fill(d,DUMMY_VAR__); + + + current_statement_begin__ = 52; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 53; + stan::model::assign(r, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), + "assigning variable r"); + current_statement_begin__ = 54; + stan::model::assign(p, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(p_pr,i,"p_pr",1)))), + "assigning variable p"); + current_statement_begin__ = 55; + stan::model::assign(d, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(d_pr,i,"d_pr",1)))) * 5), + "assigning variable d"); + } + + // validate transformed parameters + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(r(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: r" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(p(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: p" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + for (int i0__ = 0; i0__ < N; ++i0__) { + if (stan::math::is_uninitialized(d(i0__))) { + std::stringstream msg__; + msg__ << "Undefined transformed parameter: d" << '[' << i0__ << ']'; + throw std::runtime_error(msg__.str()); + } + } + + const char* function__ = "validate transformed params"; + (void) function__; // dummy to suppress unused var warning + current_statement_begin__ = 48; + check_greater_or_equal(function__,"r",r,0); + check_less_or_equal(function__,"r",r,1); + current_statement_begin__ = 49; + check_greater_or_equal(function__,"p",p,0); + check_less_or_equal(function__,"p",p,1); + current_statement_begin__ = 50; + check_greater_or_equal(function__,"d",d,0); + check_less_or_equal(function__,"d",d,5); + + // model body + + current_statement_begin__ = 61; + lp_accum__.add(normal_log(mu_pr, 0, 1)); + current_statement_begin__ = 62; + lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); + current_statement_begin__ = 65; + lp_accum__.add(normal_log(r_pr, 0, 1)); + current_statement_begin__ = 66; + lp_accum__.add(normal_log(p_pr, 0, 1)); + current_statement_begin__ = 67; + lp_accum__.add(normal_log(d_pr, 0, 1)); + current_statement_begin__ = 69; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 71; + validate_non_negative_index("pred_prob_mat", "4", 4); + Eigen::Matrix pred_prob_mat(static_cast(4)); + (void) pred_prob_mat; // dummy to suppress unused var warning + + stan::math::initialize(pred_prob_mat, DUMMY_VAR__); + stan::math::fill(pred_prob_mat,DUMMY_VAR__); + current_statement_begin__ = 72; + validate_non_negative_index("subj_att", "1", 1); + validate_non_negative_index("subj_att", "3", 3); + Eigen::Matrix subj_att(static_cast(1),static_cast(3)); + (void) subj_att; // dummy to suppress unused var warning + + stan::math::initialize(subj_att, DUMMY_VAR__); + stan::math::fill(subj_att,DUMMY_VAR__); + current_statement_begin__ = 73; + validate_non_negative_index("att_signal", "1", 1); + validate_non_negative_index("att_signal", "3", 3); + Eigen::Matrix att_signal(static_cast(1),static_cast(3)); + (void) att_signal; // dummy to suppress unused var warning + + stan::math::initialize(att_signal, DUMMY_VAR__); + stan::math::fill(att_signal,DUMMY_VAR__); + current_statement_begin__ = 74; + local_scalar_t__ sum_att_signal; + (void) sum_att_signal; // dummy to suppress unused var warning + + stan::math::initialize(sum_att_signal, DUMMY_VAR__); + stan::math::fill(sum_att_signal,DUMMY_VAR__); + current_statement_begin__ = 75; + validate_non_negative_index("tmpatt", "1", 1); + validate_non_negative_index("tmpatt", "3", 3); + Eigen::Matrix tmpatt(static_cast(1),static_cast(3)); + (void) tmpatt; // dummy to suppress unused var warning + + stan::math::initialize(tmpatt, DUMMY_VAR__); + stan::math::fill(tmpatt,DUMMY_VAR__); + current_statement_begin__ = 76; + validate_non_negative_index("tmpp", "4", 4); + Eigen::Matrix tmpp(static_cast(4)); + (void) tmpp; // dummy to suppress unused var warning + + stan::math::initialize(tmpp, DUMMY_VAR__); + stan::math::fill(tmpp,DUMMY_VAR__); + + + current_statement_begin__ = 79; + stan::math::assign(subj_att, initAtt); + current_statement_begin__ = 80; + stan::math::assign(pred_prob_mat, to_vector(multiply(subj_att,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule")))); + current_statement_begin__ = 82; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 84; + lp_accum__.add(multinomial_log(stan::model::rvalue(choice, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), "choice"), pred_prob_mat)); + current_statement_begin__ = 87; + if (as_bool(logical_eq(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),1))) { + + current_statement_begin__ = 88; + stan::math::assign(att_signal, elt_multiply(subj_att,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2))); + current_statement_begin__ = 89; + stan::math::assign(sum_att_signal, sum(att_signal)); + current_statement_begin__ = 90; + stan::math::assign(att_signal, divide(att_signal, sum_att_signal)); + current_statement_begin__ = 91; + stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(r,i,"r",1)),subj_att),multiply(get_base1(r,i,"r",1),att_signal))); + } else { + + current_statement_begin__ = 93; + stan::math::assign(att_signal, elt_multiply(subj_att,subtract(unit,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2)))); + current_statement_begin__ = 94; + stan::math::assign(sum_att_signal, sum(att_signal)); + current_statement_begin__ = 95; + stan::math::assign(att_signal, divide(att_signal, sum_att_signal)); + current_statement_begin__ = 96; + stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(p,i,"p",1)),subj_att),multiply(get_base1(p,i,"p",1),att_signal))); + } + current_statement_begin__ = 100; + stan::math::assign(subj_att, add(multiply(divide(tmpatt,sum(tmpatt)),0.99980000000000002),0.0001)); + current_statement_begin__ = 102; + stan::model::assign(tmpatt, + stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list())), + pow(get_base1(subj_att,1,1,"subj_att",1),get_base1(d,i,"d",1)), + "assigning variable tmpatt"); + current_statement_begin__ = 103; + stan::model::assign(tmpatt, + stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list())), + pow(get_base1(subj_att,1,2,"subj_att",1),get_base1(d,i,"d",1)), + "assigning variable tmpatt"); + current_statement_begin__ = 104; + stan::model::assign(tmpatt, + stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(3), stan::model::nil_index_list())), + pow(get_base1(subj_att,1,3,"subj_att",1),get_base1(d,i,"d",1)), + "assigning variable tmpatt"); + current_statement_begin__ = 107; + if (as_bool(logical_lt(t,get_base1(Tsubj,i,"Tsubj",1)))) { + + current_statement_begin__ = 108; + stan::math::assign(tmpp, add(multiply(to_vector(multiply(tmpatt,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni((t + 1)), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule"))),0.99980000000000002),0.0001)); + current_statement_begin__ = 109; + stan::math::assign(pred_prob_mat, divide(tmpp,sum(tmpp))); + } + } + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + + lp_accum__.add(lp__); + return lp_accum__.sum(); + + } // log_prob() + + template + T_ log_prob(Eigen::Matrix& params_r, + std::ostream* pstream = 0) const { + std::vector vec_params_r; + vec_params_r.reserve(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + vec_params_r.push_back(params_r(i)); + std::vector vec_params_i; + return log_prob(vec_params_r, vec_params_i, pstream); + } + + + void get_param_names(std::vector& names__) const { + names__.resize(0); + names__.push_back("mu_pr"); + names__.push_back("sigma"); + names__.push_back("r_pr"); + names__.push_back("p_pr"); + names__.push_back("d_pr"); + names__.push_back("r"); + names__.push_back("p"); + names__.push_back("d"); + names__.push_back("mu_r"); + names__.push_back("mu_p"); + names__.push_back("mu_d"); + names__.push_back("log_lik"); + names__.push_back("y_pred"); + } + + + void get_dims(std::vector >& dimss__) const { + dimss__.resize(0); + std::vector dims__; + dims__.resize(0); + dims__.push_back(3); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(3); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dimss__.push_back(dims__); + dims__.resize(0); + dims__.push_back(N); + dims__.push_back(4); + dims__.push_back(T); + dimss__.push_back(dims__); + } + + template + void write_array(RNG& base_rng__, + std::vector& params_r__, + std::vector& params_i__, + std::vector& vars__, + bool include_tparams__ = true, + bool include_gqs__ = true, + std::ostream* pstream__ = 0) const { + typedef double local_scalar_t__; + + vars__.resize(0); + stan::io::reader in__(params_r__,params_i__); + static const char* function__ = "model_wcs_sql_namespace::write_array"; + (void) function__; // dummy to suppress unused var warning + // read-transform, write parameters + vector_d mu_pr = in__.vector_constrain(3); + vector_d sigma = in__.vector_lb_constrain(0,3); + vector_d r_pr = in__.vector_constrain(N); + vector_d p_pr = in__.vector_constrain(N); + vector_d d_pr = in__.vector_constrain(N); + for (int k_0__ = 0; k_0__ < 3; ++k_0__) { + vars__.push_back(mu_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < 3; ++k_0__) { + vars__.push_back(sigma[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(r_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(p_pr[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(d_pr[k_0__]); + } + + // declare and define transformed parameters + double lp__ = 0.0; + (void) lp__; // dummy to suppress unused var warning + stan::math::accumulator lp_accum__; + + local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); + (void) DUMMY_VAR__; // suppress unused var warning + + try { + current_statement_begin__ = 48; + validate_non_negative_index("r", "N", N); + Eigen::Matrix r(static_cast(N)); + (void) r; // dummy to suppress unused var warning + + stan::math::initialize(r, DUMMY_VAR__); + stan::math::fill(r,DUMMY_VAR__); + current_statement_begin__ = 49; + validate_non_negative_index("p", "N", N); + Eigen::Matrix p(static_cast(N)); + (void) p; // dummy to suppress unused var warning + + stan::math::initialize(p, DUMMY_VAR__); + stan::math::fill(p,DUMMY_VAR__); + current_statement_begin__ = 50; + validate_non_negative_index("d", "N", N); + Eigen::Matrix d(static_cast(N)); + (void) d; // dummy to suppress unused var warning + + stan::math::initialize(d, DUMMY_VAR__); + stan::math::fill(d,DUMMY_VAR__); + + + current_statement_begin__ = 52; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 53; + stan::model::assign(r, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), + "assigning variable r"); + current_statement_begin__ = 54; + stan::model::assign(p, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(p_pr,i,"p_pr",1)))), + "assigning variable p"); + current_statement_begin__ = 55; + stan::model::assign(d, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(d_pr,i,"d_pr",1)))) * 5), + "assigning variable d"); + } + + // validate transformed parameters + current_statement_begin__ = 48; + check_greater_or_equal(function__,"r",r,0); + check_less_or_equal(function__,"r",r,1); + current_statement_begin__ = 49; + check_greater_or_equal(function__,"p",p,0); + check_less_or_equal(function__,"p",p,1); + current_statement_begin__ = 50; + check_greater_or_equal(function__,"d",d,0); + check_less_or_equal(function__,"d",d,5); + + // write transformed parameters + if (include_tparams__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(r[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(p[k_0__]); + } + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(d[k_0__]); + } + } + if (!include_gqs__) return; + // declare and define generated quantities + current_statement_begin__ = 117; + local_scalar_t__ mu_r; + (void) mu_r; // dummy to suppress unused var warning + + stan::math::initialize(mu_r, DUMMY_VAR__); + stan::math::fill(mu_r,DUMMY_VAR__); + current_statement_begin__ = 118; + local_scalar_t__ mu_p; + (void) mu_p; // dummy to suppress unused var warning + + stan::math::initialize(mu_p, DUMMY_VAR__); + stan::math::fill(mu_p,DUMMY_VAR__); + current_statement_begin__ = 119; + local_scalar_t__ mu_d; + (void) mu_d; // dummy to suppress unused var warning + + stan::math::initialize(mu_d, DUMMY_VAR__); + stan::math::fill(mu_d,DUMMY_VAR__); + current_statement_begin__ = 122; + validate_non_negative_index("log_lik", "N", N); + vector log_lik(N); + stan::math::initialize(log_lik, DUMMY_VAR__); + stan::math::fill(log_lik,DUMMY_VAR__); + current_statement_begin__ = 125; + validate_non_negative_index("y_pred", "N", N); + validate_non_negative_index("y_pred", "4", 4); + validate_non_negative_index("y_pred", "T", T); + vector > > y_pred(N, (vector >(4, (vector(T, 0))))); + stan::math::fill(y_pred, std::numeric_limits::min()); + + + current_statement_begin__ = 128; + for (int i = 1; i <= N; ++i) { + + current_statement_begin__ = 129; + for (int t = 1; t <= T; ++t) { + + current_statement_begin__ = 130; + for (int deck = 1; deck <= 4; ++deck) { + + current_statement_begin__ = 131; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(deck), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), + -(1), + "assigning variable y_pred"); + } + } + } + current_statement_begin__ = 136; + stan::math::assign(mu_r, Phi_approx(get_base1(mu_pr,1,"mu_pr",1))); + current_statement_begin__ = 137; + stan::math::assign(mu_p, Phi_approx(get_base1(mu_pr,2,"mu_pr",1))); + current_statement_begin__ = 138; + stan::math::assign(mu_d, (Phi_approx(get_base1(mu_pr,3,"mu_pr",1)) * 5)); + + current_statement_begin__ = 141; + for (int i = 1; i <= N; ++i) { + { + current_statement_begin__ = 142; + validate_non_negative_index("subj_att", "1", 1); + validate_non_negative_index("subj_att", "3", 3); + Eigen::Matrix subj_att(static_cast(1),static_cast(3)); + (void) subj_att; // dummy to suppress unused var warning + + stan::math::initialize(subj_att, DUMMY_VAR__); + stan::math::fill(subj_att,DUMMY_VAR__); + current_statement_begin__ = 143; + validate_non_negative_index("att_signal", "1", 1); + validate_non_negative_index("att_signal", "3", 3); + Eigen::Matrix att_signal(static_cast(1),static_cast(3)); + (void) att_signal; // dummy to suppress unused var warning + + stan::math::initialize(att_signal, DUMMY_VAR__); + stan::math::fill(att_signal,DUMMY_VAR__); + current_statement_begin__ = 144; + validate_non_negative_index("pred_prob_mat", "4", 4); + Eigen::Matrix pred_prob_mat(static_cast(4)); + (void) pred_prob_mat; // dummy to suppress unused var warning + + stan::math::initialize(pred_prob_mat, DUMMY_VAR__); + stan::math::fill(pred_prob_mat,DUMMY_VAR__); + current_statement_begin__ = 146; + validate_non_negative_index("tmpatt", "1", 1); + validate_non_negative_index("tmpatt", "3", 3); + Eigen::Matrix tmpatt(static_cast(1),static_cast(3)); + (void) tmpatt; // dummy to suppress unused var warning + + stan::math::initialize(tmpatt, DUMMY_VAR__); + stan::math::fill(tmpatt,DUMMY_VAR__); + current_statement_begin__ = 147; + validate_non_negative_index("tmpp", "4", 4); + Eigen::Matrix tmpp(static_cast(4)); + (void) tmpp; // dummy to suppress unused var warning + + stan::math::initialize(tmpp, DUMMY_VAR__); + stan::math::fill(tmpp,DUMMY_VAR__); + current_statement_begin__ = 149; + local_scalar_t__ sum_att_signal; + (void) sum_att_signal; // dummy to suppress unused var warning + + stan::math::initialize(sum_att_signal, DUMMY_VAR__); + stan::math::fill(sum_att_signal,DUMMY_VAR__); + + + current_statement_begin__ = 151; + stan::math::assign(subj_att, initAtt); + current_statement_begin__ = 152; + stan::math::assign(pred_prob_mat, to_vector(multiply(subj_att,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule")))); + current_statement_begin__ = 154; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + 0, + "assigning variable log_lik"); + current_statement_begin__ = 156; + for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { + + current_statement_begin__ = 158; + stan::model::assign(log_lik, + stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), + (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + multinomial_log(stan::model::rvalue(choice, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), "choice"),pred_prob_mat)), + "assigning variable log_lik"); + current_statement_begin__ = 160; + stan::model::assign(y_pred, + stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), + multinomial_rng(pred_prob_mat,1, base_rng__), + "assigning variable y_pred"); + current_statement_begin__ = 162; + if (as_bool(logical_eq(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),1))) { + + current_statement_begin__ = 163; + stan::math::assign(att_signal, elt_multiply(subj_att,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2))); + current_statement_begin__ = 164; + stan::math::assign(sum_att_signal, sum(att_signal)); + current_statement_begin__ = 165; + stan::math::assign(att_signal, divide(att_signal, sum_att_signal)); + current_statement_begin__ = 166; + stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(r,i,"r",1)),subj_att),multiply(get_base1(r,i,"r",1),att_signal))); + } else { + + current_statement_begin__ = 168; + stan::math::assign(att_signal, elt_multiply(subj_att,subtract(unit,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2)))); + current_statement_begin__ = 169; + stan::math::assign(sum_att_signal, sum(att_signal)); + current_statement_begin__ = 170; + stan::math::assign(att_signal, divide(att_signal, sum_att_signal)); + current_statement_begin__ = 171; + stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(p,i,"p",1)),subj_att),multiply(get_base1(p,i,"p",1),att_signal))); + } + current_statement_begin__ = 174; + stan::math::assign(subj_att, add(multiply(divide(tmpatt,sum(tmpatt)),0.99980000000000002),0.0001)); + current_statement_begin__ = 176; + stan::model::assign(tmpatt, + stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list())), + pow(get_base1(subj_att,1,1,"subj_att",1),get_base1(d,i,"d",1)), + "assigning variable tmpatt"); + current_statement_begin__ = 177; + stan::model::assign(tmpatt, + stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list())), + pow(get_base1(subj_att,1,2,"subj_att",1),get_base1(d,i,"d",1)), + "assigning variable tmpatt"); + current_statement_begin__ = 178; + stan::model::assign(tmpatt, + stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(3), stan::model::nil_index_list())), + pow(get_base1(subj_att,1,3,"subj_att",1),get_base1(d,i,"d",1)), + "assigning variable tmpatt"); + current_statement_begin__ = 180; + if (as_bool(logical_lt(t,get_base1(Tsubj,i,"Tsubj",1)))) { + + current_statement_begin__ = 181; + stan::math::assign(tmpp, add(multiply(to_vector(multiply(tmpatt,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni((t + 1)), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule"))),0.99980000000000002),0.0001)); + current_statement_begin__ = 182; + stan::math::assign(pred_prob_mat, divide(tmpp,sum(tmpp))); + } + } + } + } + + // validate generated quantities + current_statement_begin__ = 117; + check_greater_or_equal(function__,"mu_r",mu_r,0); + check_less_or_equal(function__,"mu_r",mu_r,1); + current_statement_begin__ = 118; + check_greater_or_equal(function__,"mu_p",mu_p,0); + check_less_or_equal(function__,"mu_p",mu_p,1); + current_statement_begin__ = 119; + check_greater_or_equal(function__,"mu_d",mu_d,0); + check_less_or_equal(function__,"mu_d",mu_d,5); + current_statement_begin__ = 122; + current_statement_begin__ = 125; + + // write generated quantities + vars__.push_back(mu_r); + vars__.push_back(mu_p); + vars__.push_back(mu_d); + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(log_lik[k_0__]); + } + for (int k_2__ = 0; k_2__ < T; ++k_2__) { + for (int k_1__ = 0; k_1__ < 4; ++k_1__) { + for (int k_0__ = 0; k_0__ < N; ++k_0__) { + vars__.push_back(y_pred[k_0__][k_1__][k_2__]); + } + } + } + + } catch (const std::exception& e) { + stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); + // Next line prevents compiler griping about no return + throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); + } + } + + template + void write_array(RNG& base_rng, + Eigen::Matrix& params_r, + Eigen::Matrix& vars, + bool include_tparams = true, + bool include_gqs = true, + std::ostream* pstream = 0) const { + std::vector params_r_vec(params_r.size()); + for (int i = 0; i < params_r.size(); ++i) + params_r_vec[i] = params_r(i); + std::vector vars_vec; + std::vector params_i_vec; + write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); + vars.resize(vars_vec.size()); + for (int i = 0; i < vars.size(); ++i) + vars(i) = vars_vec[i]; + } + + static std::string model_name() { + return "model_wcs_sql"; + } + + + void constrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "r_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "p_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "d_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "r" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "p" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "d" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_r"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_p"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_d"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_2__ = 1; k_2__ <= T; ++k_2__) { + for (int k_1__ = 1; k_1__ <= 4; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + } + + + void unconstrained_param_names(std::vector& param_names__, + bool include_tparams__ = true, + bool include_gqs__ = true) const { + std::stringstream param_name_stream__; + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "sigma" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "r_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "p_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "d_pr" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + + if (!include_gqs__ && !include_tparams__) return; + + if (include_tparams__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "r" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "p" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "d" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + } + + + if (!include_gqs__) return; + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_r"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_p"; + param_names__.push_back(param_name_stream__.str()); + param_name_stream__.str(std::string()); + param_name_stream__ << "mu_d"; + param_names__.push_back(param_name_stream__.str()); + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "log_lik" << '.' << k_0__; + param_names__.push_back(param_name_stream__.str()); + } + for (int k_2__ = 1; k_2__ <= T; ++k_2__) { + for (int k_1__ = 1; k_1__ <= 4; ++k_1__) { + for (int k_0__ = 1; k_0__ <= N; ++k_0__) { + param_name_stream__.str(std::string()); + param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; + param_names__.push_back(param_name_stream__.str()); + } + } + } + } + +}; // model + +} + +typedef model_wcs_sql_namespace::model_wcs_sql stan_model; + + +#endif diff --git a/inst/stan_files/wcs_sql.o b/inst/stan_files/wcs_sql.o new file mode 100644 index 00000000..505aacab Binary files /dev/null and b/inst/stan_files/wcs_sql.o differ diff --git a/exec/wcs_sql.stan b/inst/stan_files/wcs_sql.stan old mode 100755 new mode 100644 similarity index 84% rename from exec/wcs_sql.stan rename to inst/stan_files/wcs_sql.stan index c7a050f0..81b8ce17 --- a/exec/wcs_sql.stan +++ b/inst/stan_files/wcs_sql.stan @@ -1,3 +1,5 @@ +#include /pre/license.stan + data { int N; // number of subjects int T; // max trial @@ -11,7 +13,7 @@ data { transformed data { matrix[1, 3] initAtt; // each subject start with an even attention to each dimension - matrix[1, 3] unit; // used to flip attention after punishing feedback insde the model + matrix[1, 3] unit; // used to flip attention after punishing feedback inside the model initAtt = rep_matrix(1.0/3.0, 1, 3); unit = rep_matrix(1.0, 1, 3); @@ -32,7 +34,7 @@ transformed parameters { // transform subject-level raw parameters vector[N] r; vector[N] p; - vector[N] d; + vector[N] d; for (i in 1:N) { r[i] = Phi_approx( mu_pr[1] + sigma[1] * r_pr[i] ); @@ -54,9 +56,10 @@ model { for (i in 1:N) { // define values vector[4] pred_prob_mat; // predicted probability of choosing a deck in each trial based on attention - matrix[1, 3] subj_att; // subject's attention to each dimension - matrix[1, 3] att_signal; // signal where a subject has to pay attention after reward/punishment - matrix[1, 3] tmpatt; // temporary variable to calculate subj_att + matrix[1, 3] subj_att; // subject's attention to each dimension + matrix[1, 3] att_signal; // signal where a subject has to pay attention after reward/punishment + real sum_att_signal; // temporary variable to calculate sum(att_signal) + matrix[1, 3] tmpatt; // temporary variable to calculate subj_att vector[4] tmpp; // temporary variable to calculate pred_prob_mat // initiate values @@ -70,15 +73,17 @@ model { // re-distribute attention after getting a feedback if (outcome[i,t] == 1) { att_signal = subj_att .* choice_match_att[i,t]; - att_signal = att_signal/sum(att_signal); + sum_att_signal = sum(att_signal); + att_signal /= sum_att_signal; tmpatt = (1.0 - r[i])*subj_att + r[i]*att_signal; } else { att_signal = subj_att .* (unit - choice_match_att[i,t]); - att_signal = att_signal/sum(att_signal); + sum_att_signal = sum(att_signal); + att_signal /= sum_att_signal; tmpatt = (1.0 - p[i])*subj_att + p[i]*att_signal; } - // scaling to avoide log(0) + // scaling to avoid log(0) subj_att = (tmpatt/sum(tmpatt))*.9998+.0001; tmpatt[1, 1] = pow(subj_att[1, 1],d[i]); @@ -97,7 +102,7 @@ model { generated quantities { // for group level parameters real mu_r; - real mu_p; + real mu_p; real mu_d; // for log-likelihood calculation @@ -128,6 +133,8 @@ generated quantities { matrix[1, 3] tmpatt; vector[4] tmpp; + real sum_att_signal; + subj_att = initAtt; pred_prob_mat = to_vector(subj_att*deck_match_rule[1,,]); @@ -135,17 +142,19 @@ generated quantities { for (t in 1:Tsubj[i]) { - log_lik[i] = log_lik[i] + multinomial_lpmf(choice[i,,t] | pred_prob_mat); + log_lik[i] += multinomial_lpmf(choice[i,,t] | pred_prob_mat); y_pred[i,,t] = multinomial_rng(pred_prob_mat, 1); if(outcome[i,t] == 1) { att_signal = subj_att .* choice_match_att[i,t]; - att_signal = att_signal/sum(att_signal); + sum_att_signal = sum(att_signal); + att_signal /= sum_att_signal; tmpatt = (1.0 - r[i])*subj_att + r[i]*att_signal; } else { att_signal = subj_att .* (unit - choice_match_att[i,t]); - att_signal = att_signal/sum(att_signal); + sum_att_signal = sum(att_signal); + att_signal /= sum_att_signal; tmpatt = (1.0 - p[i])*subj_att + p[i]*att_signal; } @@ -164,3 +173,4 @@ generated quantities { } // end of subject loop } // end of local section } + diff --git a/man-roxygen/ModelFunctionInfo.schema.json b/man-roxygen/ModelFunctionInfo.schema.json new file mode 100644 index 00000000..e95dd7c2 --- /dev/null +++ b/man-roxygen/ModelFunctionInfo.schema.json @@ -0,0 +1,42 @@ +{ + "title": "Model Function Info", + "type": "object", + "required": ["model_function", "data_columns", "data_list", "parameters", "gen_init"], + "properties": { + "model_function": { + "type": "string" + }, + "data_columns": { + "type": "array", + "items": { + "type": "string" + } + }, + "data_list": { + "type": "array", + "items": { + "type": "string" + } + }, + "parameters": { + "type": "array", + "items": { + "type": "string" + } + }, + "gen_init": { + "type": "array", + "items": { + "type": "array", + "minItems": 3, + "maxItems": 3 + } + }, + "regressors": { + "type": "array", + "items": { + "type": "string" + } + } + } +} diff --git a/man-roxygen/ModelFunctionInfo.schema.md b/man-roxygen/ModelFunctionInfo.schema.md new file mode 100644 index 00000000..40a6de3f --- /dev/null +++ b/man-roxygen/ModelFunctionInfo.schema.md @@ -0,0 +1,11 @@ +### Information currently kept track of by JSON Schema: +Property | Required | Explanation +-|-|- +"model_function" | o | Name of the model function. +"data_columns" | o | Necessary data columns for the user data. +"data_list" | o | List of preprocessed user data that gets passed to Stan. +"parameters" | o | Parameters of this model. +"gen_init" | o | Initial value & bounds of the parameters **used in the R file**.
*\* Note that these bounds are just for setting the initial values; these bounds may differ from the boundary constraints given to the parameters in the Stan file.* +"regressors" | x | Regressors of this model. + +#### Written by Jethro Lee. diff --git a/man-roxygen/data/bandit2arm_delta.json b/man-roxygen/data/bandit2arm_delta.json new file mode 100644 index 00000000..1136c434 --- /dev/null +++ b/man-roxygen/data/bandit2arm_delta.json @@ -0,0 +1,7 @@ +{ + "model_function": "bandit2arm_delta", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["A", "tau"], + "gen_init": [[0, 0.5, 1], [0, 1, 5]] +} diff --git a/man-roxygen/data/bandit4arm2_kalman_filter.json b/man-roxygen/data/bandit4arm2_kalman_filter.json new file mode 100644 index 00000000..91203c3b --- /dev/null +++ b/man-roxygen/data/bandit4arm2_kalman_filter.json @@ -0,0 +1,7 @@ +{ + "model_function": "bandit4arm2_kalman_filter", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["lambda", "theta", "beta", "mu0", "sigma0", "sigmaD"], + "gen_init": [[0, 0.9, 1], [0, 50, 100], [0, 0.1, 1], [0, 85, 100], [0, 6, 15], [0, 3, 15]] +} diff --git a/man-roxygen/data/bandit4arm_4par.json b/man-roxygen/data/bandit4arm_4par.json new file mode 100644 index 00000000..75d2fde9 --- /dev/null +++ b/man-roxygen/data/bandit4arm_4par.json @@ -0,0 +1,7 @@ +{ + "model_function": "bandit4arm_4par", + "data_columns": ["subjID", "choice", "gain", "loss"], + "data_list": ["N", "T", "Tsubj", "rew", "los", "choice"], + "parameters": ["Arew", "Apun", "R", "P"], + "gen_init": [[0, 0.1, 1], [0, 0.1, 1], [0, 1, 30], [0, 1, 30]] +} diff --git a/man-roxygen/data/bandit4arm_lapse.json b/man-roxygen/data/bandit4arm_lapse.json new file mode 100644 index 00000000..684a506a --- /dev/null +++ b/man-roxygen/data/bandit4arm_lapse.json @@ -0,0 +1,7 @@ +{ + "model_function": "bandit4arm_lapse", + "data_columns": ["subjID", "choice", "gain", "loss"], + "data_list": ["N", "T", "Tsubj", "rew", "los", "choice"], + "parameters": ["Arew", "Apun", "R", "P", "xi"], + "gen_init": [[0, 0.1, 1], [0, 0.1, 1], [0, 1, 30], [0, 1, 30], [0, 0.1, 1]] +} diff --git a/man-roxygen/data/bart_par4.json b/man-roxygen/data/bart_par4.json new file mode 100644 index 00000000..f302d90f --- /dev/null +++ b/man-roxygen/data/bart_par4.json @@ -0,0 +1,7 @@ +{ + "model_function": "bart_par4", + "data_columns": ["subjID", "pumps", "explosion"], + "data_list": ["N", "T", "Tsubj", "P", "pumps", "explosion"], + "parameters": ["phi", "eta", "gam", "tau"], + "gen_init": [[0, 0.5, 1], [0, 1, "inf"], [0, 1, "inf"], [0, 1, "inf"]] +} diff --git a/man-roxygen/data/choiceRT_ddm.json b/man-roxygen/data/choiceRT_ddm.json new file mode 100644 index 00000000..d68d1560 --- /dev/null +++ b/man-roxygen/data/choiceRT_ddm.json @@ -0,0 +1,7 @@ +{ + "model_function": "choiceRT_ddm", + "data_columns": ["subjID", "choice", "RT"], + "data_list": ["N", "Nu_max", "Nl_max", "Nu", "Nl", "RTu", "RTl", "minRT", "RTbound"], + "parameters": ["alpha", "beta", "delta", "tau"], + "gen_init": [[0, 0.5, "inf"], [0, 0.5, 1], [0, 0.5, "inf"], [0, 0.15, 1]] +} diff --git a/man-roxygen/data/choiceRT_ddm_single.json b/man-roxygen/data/choiceRT_ddm_single.json new file mode 100644 index 00000000..f3a3f651 --- /dev/null +++ b/man-roxygen/data/choiceRT_ddm_single.json @@ -0,0 +1,7 @@ +{ + "model_function": "choiceRT_ddm_single", + "data_columns": ["subjID", "choice", "RT"], + "data_list": ["Nu", "Nl", "RTu", "RTl", "minRT", "RTbound"], + "parameters": ["alpha", "beta", "delta", "tau"], + "gen_init": [["None", 0.5, "None"], ["None", 0.5, "None"], ["None", 0.5, "None"], ["None", 0.15, "None"]] +} diff --git a/man-roxygen/data/choiceRT_lba_single.json b/man-roxygen/data/choiceRT_lba_single.json new file mode 100644 index 00000000..8874e8bb --- /dev/null +++ b/man-roxygen/data/choiceRT_lba_single.json @@ -0,0 +1,7 @@ +{ + "model_function": "choiceRT_lba_single", + "data_columns": ["subjID", "choice", "RT", "condition"], + "data_list": ["N_choice", "N_cond", "tr_cond", "max_tr", "RT"], + "parameters": ["d", "A", "v", "tau"], + "gen_init": [["None", 0.25, "None"], ["None", 0.75, "None"], ["None", 2, "None"], ["None", 0.2, "None"]] +} diff --git a/man-roxygen/data/cra_exp.json b/man-roxygen/data/cra_exp.json new file mode 100644 index 00000000..0290526c --- /dev/null +++ b/man-roxygen/data/cra_exp.json @@ -0,0 +1,8 @@ +{ + "model_function": "cra_exp", + "data_columns": ["subjID", "prob", "ambig", "reward_var", "reward_fix", "choice"], + "data_list": ["N", "T", "Tsubj", "choice", "prob", "ambig", "reward_var", "reward_fix"], + "parameters": ["alpha", "beta", "gamma"], + "gen_init": [[0, 1, 2], ["-inf", 0, "inf"], [0, 1, "inf"]], + "regressors": ["sv", "sv_fix", "sv_var", "p_var"] +} diff --git a/man-roxygen/data/cra_linear.json b/man-roxygen/data/cra_linear.json new file mode 100644 index 00000000..99d53c4f --- /dev/null +++ b/man-roxygen/data/cra_linear.json @@ -0,0 +1,8 @@ +{ + "model_function": "cra_linear", + "data_columns": ["subjID", "prob", "ambig", "reward_var", "reward_fix", "choice"], + "data_list": ["N", "T", "Tsubj", "choice", "prob", "ambig", "reward_var", "reward_fix"], + "parameters": ["alpha", "beta", "gamma"], + "gen_init": [[0, 1, 2], ["-inf", 0, "inf"], [0, 1, "inf"]], + "regressors": ["sv", "sv_fix", "sv_var", "p_var"] +} diff --git a/man-roxygen/data/dbdm_prob_weight.json b/man-roxygen/data/dbdm_prob_weight.json new file mode 100644 index 00000000..f46dbce5 --- /dev/null +++ b/man-roxygen/data/dbdm_prob_weight.json @@ -0,0 +1,7 @@ +{ + "model_function": "dbdm_prob_weight", + "data_columns": ["subjID", "opt1hprob", "opt2hprob", "opt1hval", "opt1lval", "opt2hval", "opt2lval", "choice"], + "data_list": ["N", "T", "Tsubj", "opt1hprob", "opt2hprob", "opt1hval", "opt1lval", "opt2hval", "opt2lval", "choice"], + "parameters": ["tau", "rho", "lambda", "beta"], + "gen_init": [[0, 0.8, 1], [0, 0.7, 2], [0, 2.5, 5], [0, 0.2, 1]] +} diff --git a/man-roxygen/data/dd_cs.json b/man-roxygen/data/dd_cs.json new file mode 100644 index 00000000..197077dd --- /dev/null +++ b/man-roxygen/data/dd_cs.json @@ -0,0 +1,7 @@ +{ + "model_function": "dd_cs", + "data_columns": ["subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "data_list": ["N", "T", "Tsubj", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "parameters": ["r", "s", "beta"], + "gen_init": [[0, 0.1, 1], [0, 1, 10], [0, 1, 5]] +} diff --git a/man-roxygen/data/dd_cs_single.json b/man-roxygen/data/dd_cs_single.json new file mode 100644 index 00000000..34cdf06f --- /dev/null +++ b/man-roxygen/data/dd_cs_single.json @@ -0,0 +1,7 @@ +{ + "model_function": "dd_cs_single", + "data_columns": ["subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "data_list": ["Tsubj", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "parameters": ["r", "s", "beta"], + "gen_init": [["None", 0.1, "None"], ["None", 1, "None"], ["None", 1, "None"]] +} diff --git a/man-roxygen/data/dd_exp.json b/man-roxygen/data/dd_exp.json new file mode 100644 index 00000000..31edb8b4 --- /dev/null +++ b/man-roxygen/data/dd_exp.json @@ -0,0 +1,7 @@ +{ + "model_function": "dd_exp", + "data_columns": ["subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "data_list": ["N", "T", "Tsubj", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "parameters": ["r", "beta"], + "gen_init": [[0, 0.1, 1], [0, 1, 5]] +} diff --git a/man-roxygen/data/dd_hyperbolic.json b/man-roxygen/data/dd_hyperbolic.json new file mode 100644 index 00000000..2f4cff4a --- /dev/null +++ b/man-roxygen/data/dd_hyperbolic.json @@ -0,0 +1,7 @@ +{ + "model_function": "dd_hyperbolic", + "data_columns": ["subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "data_list": ["N", "T", "Tsubj", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "parameters": ["k", "beta"], + "gen_init": [[0, 0.1, 1], [0, 1, 5]] +} diff --git a/man-roxygen/data/dd_hyperbolic_single.json b/man-roxygen/data/dd_hyperbolic_single.json new file mode 100644 index 00000000..05f2fc61 --- /dev/null +++ b/man-roxygen/data/dd_hyperbolic_single.json @@ -0,0 +1,7 @@ +{ + "model_function": "dd_hyperbolic_single", + "data_columns": ["subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "data_list": ["Tsubj", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice"], + "parameters": ["k", "beta"], + "gen_init": [["None", 0.1, "None"], ["None", 1, "None"]] +} diff --git a/man-roxygen/data/gng_m1.json b/man-roxygen/data/gng_m1.json new file mode 100644 index 00000000..e0c699c9 --- /dev/null +++ b/man-roxygen/data/gng_m1.json @@ -0,0 +1,8 @@ +{ + "model_function": "gng_m1", + "data_columns": ["subjID", "cue", "keyPressed", "outcome"], + "data_list": ["N", "T", "Tsubj", "cue", "pressed", "outcome"], + "parameters": ["xi", "ep", "rho"], + "gen_init": [[0, 0.1, 1], [0, 0.2, 1], [0, "exp(2)", "inf"]], + "regressors": ["Qgo", "Qnogo", "Wgo", "Wnogo"] +} diff --git a/man-roxygen/data/gng_m2.json b/man-roxygen/data/gng_m2.json new file mode 100644 index 00000000..1d2369ae --- /dev/null +++ b/man-roxygen/data/gng_m2.json @@ -0,0 +1,8 @@ +{ + "model_function": "gng_m2", + "data_columns": ["subjID", "cue", "keyPressed", "outcome"], + "data_list": ["N", "T", "Tsubj", "cue", "pressed", "outcome"], + "parameters": ["xi", "ep", "b", "rho"], + "gen_init": [[0, 0.1, 1], [0, 0.2, 1], ["-inf", 0, "inf"], [0, "exp(2)", "inf"]], + "regressors": ["Qgo", "Qnogo", "Wgo", "Wnogo"] +} diff --git a/man-roxygen/data/gng_m3.json b/man-roxygen/data/gng_m3.json new file mode 100644 index 00000000..a24ac8e8 --- /dev/null +++ b/man-roxygen/data/gng_m3.json @@ -0,0 +1,8 @@ +{ + "model_function": "gng_m3", + "data_columns": ["subjID", "cue", "keyPressed", "outcome"], + "data_list": ["N", "T", "Tsubj", "cue", "pressed", "outcome"], + "parameters": ["xi", "ep", "b", "pi", "rho"], + "gen_init": [[0, 0.1, 1], [0, 0.2, 1], ["-inf", 0, "inf"], ["-inf", 0, "inf"], [0, "exp(2)", "inf"]], + "regressors": ["Qgo", "Qnogo", "Wgo", "Wnogo", "SV"] +} diff --git a/man-roxygen/data/gng_m4.json b/man-roxygen/data/gng_m4.json new file mode 100644 index 00000000..5432dc59 --- /dev/null +++ b/man-roxygen/data/gng_m4.json @@ -0,0 +1,8 @@ +{ + "model_function": "gng_m4", + "data_columns": ["subjID", "cue", "keyPressed", "outcome"], + "data_list": ["N", "T", "Tsubj", "cue", "pressed", "outcome"], + "parameters": ["xi", "ep", "b", "pi", "rhoRew", "rhoPun"], + "gen_init": [[0, 0.1, 1], [0, 0.2, 1], ["-inf", 0, "inf"], ["-inf", 0, "inf"], [0, "exp(2)", "inf"], [0, "exp(2)", "inf"]], + "regressors": ["Qgo", "Qnogo", "Wgo", "Wnogo", "SV"] +} diff --git a/man-roxygen/data/igt_orl.json b/man-roxygen/data/igt_orl.json new file mode 100644 index 00000000..68e66ce2 --- /dev/null +++ b/man-roxygen/data/igt_orl.json @@ -0,0 +1,7 @@ +{ + "model_function": "igt_orl", + "data_columns": ["subjID", "choice", "gain", "loss"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome", "sign_out"], + "parameters": ["Arew", "Apun", "K", "betaF", "betaP"], + "gen_init": [[0, 0.1, 1], [0, 0.1, 1], [0, 0.1, 5], ["-inf", 0.1, "inf"], ["-inf", 1, "inf"]] +} diff --git a/man-roxygen/data/igt_pvl_decay.json b/man-roxygen/data/igt_pvl_decay.json new file mode 100644 index 00000000..56327ee1 --- /dev/null +++ b/man-roxygen/data/igt_pvl_decay.json @@ -0,0 +1,7 @@ +{ + "model_function": "igt_pvl_decay", + "data_columns": ["subjID", "choice", "gain", "loss"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["A", "alpha", "cons", "lambda"], + "gen_init": [[0, 0.5, 1], [0, 0.5, 2], [0, 1, 5], [0, 1, 10]] +} diff --git a/man-roxygen/data/igt_pvl_delta.json b/man-roxygen/data/igt_pvl_delta.json new file mode 100644 index 00000000..cfe67409 --- /dev/null +++ b/man-roxygen/data/igt_pvl_delta.json @@ -0,0 +1,7 @@ +{ + "model_function": "igt_pvl_delta", + "data_columns": ["subjID", "choice", "gain", "loss"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["A", "alpha", "cons", "lambda"], + "gen_init": [[0, 0.5, 1], [0, 0.5, 2], [0, 1, 5], [0, 1, 10]] +} diff --git a/man-roxygen/data/igt_vpp.json b/man-roxygen/data/igt_vpp.json new file mode 100644 index 00000000..b8ae48be --- /dev/null +++ b/man-roxygen/data/igt_vpp.json @@ -0,0 +1,7 @@ +{ + "model_function": "igt_vpp", + "data_columns": ["subjID", "choice", "gain", "loss"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["A", "alpha", "cons", "lambda", "epP", "epN", "K", "w"], + "gen_init": [[0, 0.5, 1], [0, 0.5, 2], [0, 1, 5], [0, 1, 10], ["-inf", 0, "inf"], ["-inf", 0, "inf"], [0, 0.5, 1], [0, 0.5, 1]] +} diff --git a/man-roxygen/data/peer_ocu.json b/man-roxygen/data/peer_ocu.json new file mode 100644 index 00000000..ef0fb04f --- /dev/null +++ b/man-roxygen/data/peer_ocu.json @@ -0,0 +1,7 @@ +{ + "model_function": "peer_ocu", + "data_columns": ["subjID", "condition", "p_gamble", "safe_Hpayoff", "safe_Lpayoff", "risky_Hpayoff", "risky_Lpayoff", "choice"], + "data_list": ["N", "T", "Tsubj", "condition", "p_gamble", "safe_Hpayoff", "safe_Lpayoff", "risky_Hpayoff", "risky_Lpayoff", "choice"], + "parameters": ["rho", "tau", "ocu"], + "gen_init": [[0, 1, 2], [0, 1, "inf"], ["-inf", 0, "inf"]] +} diff --git a/man-roxygen/data/prl_ewa.json b/man-roxygen/data/prl_ewa.json new file mode 100644 index 00000000..321479e6 --- /dev/null +++ b/man-roxygen/data/prl_ewa.json @@ -0,0 +1,8 @@ +{ + "model_function": "prl_ewa", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["phi", "rho", "beta"], + "gen_init": [[0, 0.5, 1], [0, 0.1, 1], [0, 1, 10]], + "regressors": ["ev_c", "ev_nc", "ew_c", "ew_nc"] +} diff --git a/man-roxygen/data/prl_fictitious.json b/man-roxygen/data/prl_fictitious.json new file mode 100644 index 00000000..ce3307d6 --- /dev/null +++ b/man-roxygen/data/prl_fictitious.json @@ -0,0 +1,8 @@ +{ + "model_function": "prl_fictitious", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["eta", "alpha", "beta"], + "gen_init": [[0, 0.5, 1], ["-inf", 0, "inf"], [0, 1, 10]], + "regressors": ["ev_c", "ev_nc", "pe_c", "pe_nc", "dv"] +} diff --git a/man-roxygen/data/prl_fictitious_multipleB.json b/man-roxygen/data/prl_fictitious_multipleB.json new file mode 100644 index 00000000..fef7baa0 --- /dev/null +++ b/man-roxygen/data/prl_fictitious_multipleB.json @@ -0,0 +1,8 @@ +{ + "model_function": "prl_fictitious_multipleB", + "data_columns": ["subjID", "block", "choice", "outcome"], + "data_list": ["N", "B", "Bsubj", "T", "Tsubj", "choice", "outcome"], + "parameters": ["eta", "alpha", "beta"], + "gen_init": [[0, 0.5, 1], ["-inf", 0, "inf"], [0, 1, 10]], + "regressors": ["ev_c", "ev_nc", "pe_c", "pe_nc", "dv"] +} diff --git a/man-roxygen/data/prl_fictitious_rp.json b/man-roxygen/data/prl_fictitious_rp.json new file mode 100644 index 00000000..39050f7f --- /dev/null +++ b/man-roxygen/data/prl_fictitious_rp.json @@ -0,0 +1,8 @@ +{ + "model_function": "prl_fictitious_rp", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["eta_pos", "eta_neg", "alpha", "beta"], + "gen_init": [[0, 0.5, 1], [0, 0.5, 1], ["-inf", 0, "inf"], [0, 1, 10]], + "regressors": ["ev_c", "ev_nc", "pe_c", "pe_nc", "dv"] +} diff --git a/man-roxygen/data/prl_fictitious_rp_woa.json b/man-roxygen/data/prl_fictitious_rp_woa.json new file mode 100644 index 00000000..3781565d --- /dev/null +++ b/man-roxygen/data/prl_fictitious_rp_woa.json @@ -0,0 +1,8 @@ +{ + "model_function": "prl_fictitious_rp_woa", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["eta_pos", "eta_neg", "beta"], + "gen_init": [[0, 0.5, 1], [0, 0.5, 1], [0, 1, 10]], + "regressors": ["ev_c", "ev_nc", "pe_c", "pe_nc", "dv"] +} diff --git a/man-roxygen/data/prl_fictitious_woa.json b/man-roxygen/data/prl_fictitious_woa.json new file mode 100644 index 00000000..dac178bd --- /dev/null +++ b/man-roxygen/data/prl_fictitious_woa.json @@ -0,0 +1,8 @@ +{ + "model_function": "prl_fictitious_woa", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["eta", "beta"], + "gen_init": [[0, 0.5, 1], [0, 1, 10]], + "regressors": ["ev_c", "ev_nc", "pe_c", "pe_nc", "dv"] +} diff --git a/man-roxygen/data/prl_rp.json b/man-roxygen/data/prl_rp.json new file mode 100644 index 00000000..e570964f --- /dev/null +++ b/man-roxygen/data/prl_rp.json @@ -0,0 +1,8 @@ +{ + "model_function": "prl_rp", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome"], + "parameters": ["Apun", "Arew", "beta"], + "gen_init": [[0, 0.1, 1], [0, 0.1, 1], [0, 1, 10]], + "regressors": ["ev_c", "ev_nc", "pe"] +} diff --git a/man-roxygen/data/prl_rp_multipleB.json b/man-roxygen/data/prl_rp_multipleB.json new file mode 100644 index 00000000..53106df4 --- /dev/null +++ b/man-roxygen/data/prl_rp_multipleB.json @@ -0,0 +1,8 @@ +{ + "model_function": "prl_rp_multipleB", + "data_columns": ["subjID", "block", "choice", "outcome"], + "data_list": ["N", "B", "Bsubj", "T", "Tsubj", "choice", "outcome"], + "parameters": ["Apun", "Arew", "beta"], + "gen_init": [[0, 0.1, 1], [0, 0.1, 1], [0, 1, 10]], + "regressors": ["ev_c", "ev_nc", "pe"] +} diff --git a/man-roxygen/data/pst_gainloss_Q.json b/man-roxygen/data/pst_gainloss_Q.json new file mode 100644 index 00000000..b7c25374 --- /dev/null +++ b/man-roxygen/data/pst_gainloss_Q.json @@ -0,0 +1,7 @@ +{ + "model_function": "pst_gainloss_Q", + "data_columns": ["subjID", "type", "choice", "reward"], + "data_list": ["N", "T", "Tsubj", "option1", "option2", "choice", "reward"], + "parameters": ["alpha_pos", "alpha_neg", "beta"], + "gen_init": [[0, 0.5, 1], [0, 0.5, 1], [0, 1, 10]] +} diff --git a/man-roxygen/data/ra_noLA.json b/man-roxygen/data/ra_noLA.json new file mode 100644 index 00000000..f44ce55b --- /dev/null +++ b/man-roxygen/data/ra_noLA.json @@ -0,0 +1,7 @@ +{ + "model_function": "ra_noLA", + "data_columns": ["subjID", "gain", "loss", "cert", "gamble"], + "data_list": ["N", "T", "Tsubj", "gain", "loss", "cert", "gamble"], + "parameters": ["rho", "tau"], + "gen_init": [[0, 1, 2], [0, 1, 5]] +} diff --git a/man-roxygen/data/ra_noRA.json b/man-roxygen/data/ra_noRA.json new file mode 100644 index 00000000..4d12a9cf --- /dev/null +++ b/man-roxygen/data/ra_noRA.json @@ -0,0 +1,7 @@ +{ + "model_function": "ra_noRA", + "data_columns": ["subjID", "gain", "loss", "cert", "gamble"], + "data_list": ["N", "T", "Tsubj", "gain", "loss", "cert", "gamble"], + "parameters": ["lambda", "tau"], + "gen_init": [[0, 1, 5], [0, 1, 5]] +} diff --git a/man-roxygen/data/ra_prospect.json b/man-roxygen/data/ra_prospect.json new file mode 100644 index 00000000..a5036746 --- /dev/null +++ b/man-roxygen/data/ra_prospect.json @@ -0,0 +1,7 @@ +{ + "model_function": "ra_prospect", + "data_columns": ["subjID", "gain", "loss", "cert", "gamble"], + "data_list": ["N", "T", "Tsubj", "gain", "loss", "cert", "gamble"], + "parameters": ["rho", "lambda", "tau"], + "gen_init": [[0, 1, 2], [0, 1, 5], [0, 1, 5]] +} diff --git a/man-roxygen/data/rdt_happiness.json b/man-roxygen/data/rdt_happiness.json new file mode 100644 index 00000000..bd0c5bbb --- /dev/null +++ b/man-roxygen/data/rdt_happiness.json @@ -0,0 +1,7 @@ +{ + "model_function": "rdt_happiness", + "data_columns": ["subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", "RT_happy"], + "data_list": ["N", "T", "Tsubj", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", "RT_happy"], + "parameters": ["w0", "w1", "w2", "w3", "gam", "sig"], + "gen_init": [["-inf", 1, "inf"], ["-inf", 1, "inf"], ["-inf", 1, "inf"], ["-inf", 1, "inf"], [0, 0.5, 1], [0, 1, "inf"]] +} diff --git a/man-roxygen/data/ts_par4.json b/man-roxygen/data/ts_par4.json new file mode 100644 index 00000000..ea0fee8c --- /dev/null +++ b/man-roxygen/data/ts_par4.json @@ -0,0 +1,7 @@ +{ + "model_function": "ts_par4", + "data_columns": ["subjID", "level1_choice", "level2_choice", "reward"], + "data_list": ["N", "T", "Tsubj", "level1_choice", "level2_choice", "reward", "trans_prob"], + "parameters": ["a", "beta", "pi", "w"], + "gen_init": [[0, 0.5, 1], [0, 1, "inf"], [0, 1, 5], [0, 0.5, 1]] +} diff --git a/man-roxygen/data/ts_par6.json b/man-roxygen/data/ts_par6.json new file mode 100644 index 00000000..c209eb73 --- /dev/null +++ b/man-roxygen/data/ts_par6.json @@ -0,0 +1,7 @@ +{ + "model_function": "ts_par6", + "data_columns": ["subjID", "level1_choice", "level2_choice", "reward"], + "data_list": ["N", "T", "Tsubj", "level1_choice", "level2_choice", "reward", "trans_prob"], + "parameters": ["a1", "beta1", "a2", "beta2", "pi", "w"], + "gen_init": [[0, 0.5, 1], [0, 1, "inf"], [0, 0.5, 1], [0, 1, "inf"], [0, 1, 5], [0, 0.5, 1]] +} diff --git a/man-roxygen/data/ts_par7.json b/man-roxygen/data/ts_par7.json new file mode 100644 index 00000000..13e51eb5 --- /dev/null +++ b/man-roxygen/data/ts_par7.json @@ -0,0 +1,7 @@ +{ + "model_function": "ts_par7", + "data_columns": ["subjID", "level1_choice", "level2_choice", "reward"], + "data_list": ["N", "T", "Tsubj", "level1_choice", "level2_choice", "reward", "trans_prob"], + "parameters": ["a1", "beta1", "a2", "beta2", "pi", "w", "lambda"], + "gen_init": [[0, 0.5, 1], [0, 1, "inf"], [0, 0.5, 1], [0, 1, "inf"], [0, 1, 5], [0, 0.5, 1], [0, 0.5, 1]] +} diff --git a/man-roxygen/data/ug_bayes.json b/man-roxygen/data/ug_bayes.json new file mode 100644 index 00000000..4c2d8a97 --- /dev/null +++ b/man-roxygen/data/ug_bayes.json @@ -0,0 +1,7 @@ +{ + "model_function": "ug_bayes", + "data_columns": ["subjID", "offer", "accept"], + "data_list": ["N", "T", "Tsubj", "offer", "accept"], + "parameters": ["alpha", "beta", "tau"], + "gen_init": [[0, 1, 20], [0, 0.5, 10], [0, 1, 10]] +} diff --git a/man-roxygen/data/ug_delta.json b/man-roxygen/data/ug_delta.json new file mode 100644 index 00000000..a25d14b4 --- /dev/null +++ b/man-roxygen/data/ug_delta.json @@ -0,0 +1,7 @@ +{ + "model_function": "ug_delta", + "data_columns": ["subjID", "offer", "accept"], + "data_list": ["N", "T", "Tsubj", "offer", "accept"], + "parameters": ["alpha", "tau", "ep"], + "gen_init": [[0, 1, 20], [0, 1, 10], [0, 0.5, 1]] +} diff --git a/man-roxygen/data/wcs_sql.json b/man-roxygen/data/wcs_sql.json new file mode 100644 index 00000000..ed839ebd --- /dev/null +++ b/man-roxygen/data/wcs_sql.json @@ -0,0 +1,7 @@ +{ + "model_function": "wcs_sql", + "data_columns": ["subjID", "choice", "outcome"], + "data_list": ["N", "T", "Tsubj", "choice", "outcome", "choice_match_att", "deck_match_rule"], + "parameters": ["r", "p", "d"], + "gen_init": [[0, 0.1, 1], [0, 0.1, 1], [0, 1, 5]] +} diff --git a/man-roxygen/model-documentation.R b/man-roxygen/model-documentation.R new file mode 100644 index 00000000..19776ee3 --- /dev/null +++ b/man-roxygen/model-documentation.R @@ -0,0 +1,138 @@ +#' @title <%= TASK_NAME %> <%= get0("TASK_CITE") %> +#' +#' @description +#' <%= MODEL_TYPE %> Bayesian Modeling of the <%= TASK_NAME %> with the following parameters: +#' <%= PARAMETERS %>. +#' +#' <%= ifelse(exists("CONTRIBUTOR"), paste0("@description Contributor: ", CONTRIBUTOR), "") %> +#' +#' @description +#' \strong{MODEL:} <%= MODEL_NAME %> <%= get0("MODEL_CITE") %> +#' +#' @param data A .txt file containing the data to be modeled. Data columns should be labeled as: +#' <%= DATA_COLUMNS %>. See \bold{Details} below for more information. +#' @param niter Number of iterations, including warm-up. Defaults to 4000. +#' @param nwarmup Number of iterations used for warm-up only. Defaults to 1000. +#' @param nchain Number of Markov chains to run. Defaults to 4. +#' @param ncore Number of CPUs to be used for running. Defaults to 1. +#' @param nthin Every \code{i == nthin} sample will be used to generate the posterior distribution. +#' Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +#' high. +#' @param inits Character value specifying how the initial values should be generated. Options are +#' "fixed" or "random", or your own initial values. +#' @param indPars Character value specifying how to summarize individual parameters. Current options +#' are: "mean", "median", or "mode". +#' @param modelRegressor +#' <% EXISTS_REGRESSORS <- paste0("For this model they are: ", get0("REGRESSORS"), ".") %> +#' <% NOT_EXISTS_REGRESSORS <- "Currently not available for this model." %> +#' Export model-based regressors? TRUE or FALSE. +#' <%= ifelse(exists("REGRESSORS"), EXISTS_REGRESSORS, NOT_EXISTS_REGRESSORS) %> +#' @param vb Use variational inference to approximately draw from a posterior distribution. Defaults +#' to FALSE. +#' @param inc_postpred +#' <% POSTPREDS_NULL <- exists("IS_NULL_POSTPREDS") && (IS_NULL_POSTPREDS == "TRUE") %> +#' <%= ifelse(POSTPREDS_NULL, "\\strong{(Currently not available.)}", "") %> +#' Include trial-level posterior predictive simulations in model output (may greatly increase file +#' size). Defaults to FALSE. +#' @param adapt_delta Floating point value representing the target acceptance probability of a new +#' sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below. +#' @param stepsize Integer value specifying the size of each leapfrog step that the MCMC sampler can +#' take on each new iteration. See \bold{Details} below. +#' @param max_treedepth Integer value specifying how many leapfrog steps the MCMC sampler can take +#' on each new iteration. See \bold{Details} below. +#' @param ... +#' <% AA_EXP_1 <- "For this model, it's possible to set the following \\strong{model-specific " %> +#' <% AA_EXP_2 <- "argument} to a value that you may prefer. \\cr" %> +#' <%= ifelse(exists("ADDITIONAL_ARG"), paste0(AA_EXP_1, AA_EXP_2), "Not used for this model.") %> +#' <%= ifelse(exists("ADDITIONAL_ARG"), ADDITIONAL_ARG, "") %> +#' +#' @details +#' This section describes some of the function arguments in greater detail. +#' +#' \strong{data} should be assigned a character value specifying the full path and name (including +#' extension information, e.g. ".txt") of the file that contains the behavioral data-set of all +#' subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text +#' file, whose rows represent trial-by-trial observations and columns represent variables.\cr +#' For the <%= TASK_NAME %>, there should be <%= LENGTH_DATA_COLUMNS %> columns of data with the +#' labels <%= DATA_COLUMNS %>. It is not necessary for the columns to be in this particular order, +#' however it is necessary that they be labeled correctly and contain the information below: +#' \describe{ +#' <%= DETAILS_DATA_1 %> +#' <%= get0("DETAILS_DATA_2") %> +#' <%= get0("DETAILS_DATA_3") %> +#' <%= get0("DETAILS_DATA_4") %> +#' <%= get0("DETAILS_DATA_5") %> +#' <%= get0("DETAILS_DATA_6") %> +#' <%= get0("DETAILS_DATA_7") %> +#' <%= get0("DETAILS_DATA_8") %> +#' <%= get0("DETAILS_DATA_9") %> +#' } +#' \strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", +#' etc.), but only the data within the column names listed above will be used during the modeling. +#' As long as the necessary columns mentioned above are present and labeled correctly, there is no +#' need to remove other miscellaneous data columns. +#' +#' \strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored +#' upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent +#' to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the +#' sampling chains begin) can have a heavy influence on the generated posterior distributions. The +#' \code{nwarmup} argument can be set to a high number in order to curb the effects that initial +#' values have on the resulting posteriors. +#' +#' \strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling +#' sequences) should be used to draw samples from the posterior distribution. Since the posteriors +#' are generated from a sampling process, it is good practice to run multiple chains to ensure +#' that a reasonably representative posterior is attained. When the sampling is complete, it is +#' possible to check the multiple chains for convergence by running the following line of code: +#' \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". +#' +#' \strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, +#' using only every \code{i == nthin} samples to generate posterior distributions. By default, +#' \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. +#' +#' \strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are +#' advanced options that give the user more control over Stan's MCMC sampler. It is recommended +#' that only advanced users change the default values, as alterations can profoundly change the +#' sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in +#' Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for +#' more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC +#' Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide +#' and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical +#' description of these arguments. +#' +#' @return A class "hBayesDM" object \code{modelData} with the following components: +#' \describe{ +#' \item{\code{model}}{Character value that is the name of the model ("<%= MODEL_FUNCTION %>").} +#' \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by +#' \code{indPars}) for each subject.} +#' \item{\code{parVals}}{List object containing the posterior samples over different parameters.} +#' \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan +#' model.} +#' \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by +#' the user.} +#' <% RETURN_REGRESSORS <- "\\item{\\code{modelRegressor}}{List object containing the " %> +#' <% RETURN_REGRESSORS <- paste0(RETURN_REGRESSORS, "extracted model-based regressors.}") %> +#' <%= ifelse(exists("REGRESSORS"), RETURN_REGRESSORS, "") %> +#' } +#' +#' @seealso +#' We refer users to our in-depth tutorial for an example of using hBayesDM: +#' \url{https://rpubs.com/CCSL/hBayesDM} +#' +#' @examples +#' \dontrun{ +#' # Run the model and store results in "output" +#' output <- <%= MODEL_FUNCTION %>("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) +#' +#' # Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +#' plot(output, type = "trace") +#' +#' # Check Rhat values (all Rhat values should be less than or equal to 1.1) +#' rhat(output) +#' +#' # Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) +#' plot(output) +#' +#' # Show the WAIC and LOOIC model fit estimates +#' printFit(output) +#' } diff --git a/man-roxygen/model-documentation.md b/man-roxygen/model-documentation.md new file mode 100644 index 00000000..60887ede --- /dev/null +++ b/man-roxygen/model-documentation.md @@ -0,0 +1,60 @@ +# How to document model functions (by Jethro Lee) + +Template Variable | Required? | Format +-|-|- +`MODEL_FUNCTION` | Y | +`CONTRIBUTOR` | *optional* | \href{   }{   }, ... +`TASK_NAME` | Y | +`TASK_CITE` | *optional* | (   ) +`MODEL_NAME` | Y | +`MODEL_CITE` | *optional* | (   ) +`MODEL_TYPE` | Y | `Hierarchical`
*or*
`Individual`
*or*
`Multiple-Block Hierarchical` +`DATA_COLUMNS` | Y | "   ", ... +`PARAMETERS` | Y | "   " (   ), ... +`REGRESSORS` | *optional* | "   ", ... +`IS_NULL_POSTPREDS` | *optional* | `TRUE` +`ADDITIONAL_ARG` | *optional* | \code{   }: *explanation here* +`LENGTH_DATA_COLUMNS` | Y | # +`DETAILS_DATA_1` | Y | `\item{"subjID"}{A unique identifier for each subject in the data-set.}` +`DETAILS_DATA_2` | *optional* | \item{"   "}{   } +`DETAILS_DATA_3` | *optional* | \item{"   "}{   } +`DETAILS_DATA_4` | *optional* | \item{"   "}{   } +`DETAILS_DATA_5` | *optional* | \item{"   "}{   } +`DETAILS_DATA_6` | *optional* | \item{"   "}{   } +`DETAILS_DATA_7` | *optional* | \item{"   "}{   } +`DETAILS_DATA_8` | *optional* | \item{"   "}{   } +`DETAILS_DATA_9` | *optional* | \item{"   "}{   } + +## Example: `igt_pvl_decay.R` +```R +#' @templateVar MODEL_FUNCTION igt_pvl_decay +#' @templateVar TASK_NAME Iowa Gambling Task +#' @templateVar MODEL_NAME Prospect Valence Learning (PVL) Decay-RI +#' @templateVar MODEL_CITE (Ahn et al., 2014, Frontiers in Psychology) +#' @templateVar MODEL_TYPE Hierarchical +#' @templateVar DATA_COLUMNS "subjID", "choice", "gain", "loss" +#' @templateVar PARAMETERS "A" (decay rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion) +#' @templateVar ADDITIONAL_ARG \code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100. +#' @templateVar LENGTH_DATA_COLUMNS 4 +#' @templateVar DETAILS_DATA_1 \item{"subjID"}{A unique identifier for each subject in the data-set.} +#' @templateVar DETAILS_DATA_2 \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} +#' @templateVar DETAILS_DATA_3 \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} +#' @templateVar DETAILS_DATA_4 \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} +#' +#' @template model-documentation +#' +#' @export +#' @include hBayesDM_model.R +#' +#' @references +#' Ahn, W.-Y., Vasilev, G., Lee, S.-H., Busemeyer, J. R., Kruschke, J. K., Bechara, A., & Vassileva, +#' J. (2014). Decision-making in stimulant and opiate addicts in protracted abstinence: evidence +#' from computational modeling with pure users. Frontiers in Psychology, 5, 1376. +#' http://doi.org/10.3389/fpsyg.2014.00849 +``` + +## How to work with the template: `model-documentation.R` +- R expressions between `<%` and `%>` are **executed** in-place. +- The value of the R expression between `<%=` and `%>` is **printed**. +- All text outside of that is printed *as-is*. +#### See more: roxygen2 uses [brew](https://www.rdocumentation.org/packages/brew/versions/1.0-6/topics/brew) to preprocess the template. diff --git a/man/bandit2arm_delta.Rd b/man/bandit2arm_delta.Rd index 54e103ff..a8a33501 100644 --- a/man/bandit2arm_delta.Rd +++ b/man/bandit2arm_delta.Rd @@ -2,110 +2,139 @@ % Please edit documentation in R/bandit2arm_delta.R \name{bandit2arm_delta} \alias{bandit2arm_delta} -\title{Two-Arm Bandit Task} +\title{2-Armed Bandit Task (Erev et al., 2010; Hertwig et al., 2004)} \usage{ -bandit2arm_delta(data = "choose", niter = 3000, nwarmup = 1000, +bandit2arm_delta(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"bandit2arm_delta"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters.} - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("bandit2arm_delta").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Two-Arm Bandit Task (e.g., Erev et al., 2010; Hertwig et al., 2004) using the following parameters: "A" (learning rate), "tau" (inverse temperature). +Hierarchical Bayesian Modeling of the 2-Armed Bandit Task with the following parameters: + "A" (learning rate), "tau" (inverse temperature). -\strong{MODEL:} -Rescorla-Wagner (delta) model +\strong{MODEL:} Rescorla-Wagner (Delta) Model } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Two-Arm Bandit Task, there should be three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the 2-Armed Bandit Task, there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{Should contain a unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{Should contain a integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in 2-arm bandit task).} - \item{\code{"outcome"}}{Should contain outcomes within each given trial (e.g., 1 = reward, -1 = loss).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on the given trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of the given trial (where reward == 1, and loss == -1).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- bandit2arm_delta(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- bandit2arm_delta("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -118,15 +147,15 @@ printFit(output) } } \references{ -Erev, I., Ert, E., Roth, A. E., Haruvy, E., Herzog, S. M., Hau, R., et al. (2010). A choice prediction competition: Choices -from experience and from description. Journal of Behavioral Decision Making, 23(1), 15-47. http://doi.org/10.1002/bdm.683 +Erev, I., Ert, E., Roth, A. E., Haruvy, E., Herzog, S. M., Hau, R., et al. (2010). A choice + prediction competition: Choices from experience and from description. Journal of Behavioral + Decision Making, 23(1), 15-47. http://doi.org/10.1002/bdm.683 -Hertwig, R., Barron, G., Weber, E. U., & Erev, I. (2004). Decisions From Experience and the Effect of Rare Events in Risky -Choice. Psychological Science, 15(8), 534-539. http://doi.org/10.1111/j.0956-7976.2004.00715.x - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Hertwig, R., Barron, G., Weber, E. U., & Erev, I. (2004). Decisions From Experience and the + Effect of Rare Events in Risky Choice. Psychological Science, 15(8), 534-539. + http://doi.org/10.1111/j.0956-7976.2004.00715.x } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/bandit4arm2_kalman_filter.Rd b/man/bandit4arm2_kalman_filter.Rd new file mode 100644 index 00000000..b27fab00 --- /dev/null +++ b/man/bandit4arm2_kalman_filter.Rd @@ -0,0 +1,158 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/bandit4arm2_kalman_filter.R +\name{bandit4arm2_kalman_filter} +\alias{bandit4arm2_kalman_filter} +\title{4-Armed Bandit Task (2)} +\usage{ +bandit4arm2_kalman_filter(data = "choose", niter = 4000, + nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, + inits = "random", indPars = "mean", modelRegressor = FALSE, + vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, + stepsize = 1, max_treedepth = 10, ...) +} +\arguments{ +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} + +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} + +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} + +\item{nchain}{Number of Markov chains to run. Defaults to 4.} + +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} + +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} + +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} + +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} + +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} + +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} + +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} + +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} + +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} + +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} + +\item{...}{Not used for this model.} +} +\value{ +A class "hBayesDM" object \code{modelData} with the following components: +\describe{ + \item{\code{model}}{Character value that is the name of the model ("bandit4arm2_kalman_filter").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + +} +} +\description{ +Hierarchical Bayesian Modeling of the 4-Armed Bandit Task (2) with the following parameters: + "lambda" (decay factor), "theta" (decay center), "beta" (inverse softmax temperature), "mu0" (anticipated initial mean of all 4 options), "sigma0" (anticipated initial sd (uncertainty factor) of all 4 options), "sigmaD" (sd of diffusion noise). + +Contributor: \href{https://zohyos7.github.io}{Yoonseo Zoh}, \href{https://lei-zhang.net/}{Lei Zhang} + +\strong{MODEL:} Kalman Filter (Daw et al., 2006, Nature) +} +\details{ +This section describes some of the function arguments in greater detail. + +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the 4-Armed Bandit Task (2), there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: +\describe{ + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on the given trial: 1, 2, 3, or 4.} + \item{"outcome"}{Integer value representing the outcome of the given trial (where reward == 1, and loss == -1).} + + + + + + +} +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. +} +\examples{ +\dontrun{ +# Run the model and store results in "output" +output <- bandit4arm2_kalman_filter("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) + +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") + +# Check Rhat values (all Rhat values should be less than or equal to 1.1) +rhat(output) + +# Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) +plot(output) + +# Show the WAIC and LOOIC model fit estimates +printFit(output) +} +} +\references{ +Daw, N. D., O'Doherty, J. P., Dayan, P., Seymour, B., & Dolan, R. J. (2006). Cortical substrates + for exploratory decisions in humans. Nature, 441(7095), 876-879. +} +\seealso{ +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} +} diff --git a/man/bandit4arm_4par.Rd b/man/bandit4arm_4par.Rd index c9e0a814..9119d44b 100644 --- a/man/bandit4arm_4par.Rd +++ b/man/bandit4arm_4par.Rd @@ -2,111 +2,139 @@ % Please edit documentation in R/bandit4arm_4par.R \name{bandit4arm_4par} \alias{bandit4arm_4par} -\title{4-armed bandit task} +\title{4-Armed Bandit Task} \usage{ -bandit4arm_4par(data = "choose", niter = 4000, nwarmup = 2000, +bandit4arm_4par(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "gain", "loss". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"bandit4arm_4par"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("bandit4arm_4par").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the 4-armed bandit task with the following parameters: "Arew" (Reward learning rate), "Apun" (Punishment learning rate), "R" (Reward sensitivity), and "P" (Punishment sensitivity). +Hierarchical Bayesian Modeling of the 4-Armed Bandit Task with the following parameters: + "Arew" (reward learning rate), "Apun" (punishment learning rate), "R" (reward sensitivity), "P" (punishment sensitivity). -\strong{MODEL:} -4 parameter model without C (choice perseveration) (Seymour et al 2012, J Neuro) +\strong{MODEL:} 4 Parameter Model, without C (choice perseveration) (Seymour et al., 2012, J Neuro) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the 4-armed bandit task, there should be four columns of data with the labels -"subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the 4-Armed Bandit Task, there should be 4 columns of data with the + labels "subjID", "choice", "gain", "loss". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{A nominal integer representing which choice was chosen within the given trial (e.g. 1, 2, 3, or 4).} - \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} - \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on the given trial: 1, 2, 3, or 4.} + \item{"gain"}{Floating point value representing the amount of currency won on the given trial (e.g. 50, 100).} + \item{"loss"}{Floating point value representing the amount of currency lost on the given trial (e.g. 0, -50).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- bandit4arm_4par("example", 3000, 1000, 4, 4) # 4 chains, 4 cores (parallel processing) +output <- bandit4arm_4par("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,11 +145,12 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) } - } \references{ -Seymour, Daw, Roiser, Dayan, & Dolan (2012) Serotonin Selectively Modulates Reward Value in Human Decision-Making. J Neuro, 32(17), 5833-5842. +Seymour, Daw, Roiser, Dayan, & Dolan (2012). Serotonin Selectively Modulates Reward Value in + Human Decision-Making. J Neuro, 32(17), 5833-5842. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/bandit4arm_lapse.Rd b/man/bandit4arm_lapse.Rd index ed9d7bd2..8733203d 100644 --- a/man/bandit4arm_lapse.Rd +++ b/man/bandit4arm_lapse.Rd @@ -2,111 +2,139 @@ % Please edit documentation in R/bandit4arm_lapse.R \name{bandit4arm_lapse} \alias{bandit4arm_lapse} -\title{4-armed bandit task} +\title{4-Armed Bandit Task} \usage{ -bandit4arm_lapse(data = "choose", niter = 4000, nwarmup = 2000, +bandit4arm_lapse(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "gain", "loss". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"bandit4arm_lapse"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("bandit4arm_lapse").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the 4-armed bandit task with the following parameters: "Arew" (Reward learning rate), "Apun" (Punishment learning rate), "R" (Reward sensitivity), "P" (Punishment sensitivity), and "xi" (Noise). +Hierarchical Bayesian Modeling of the 4-Armed Bandit Task with the following parameters: + "Arew" (reward learning rate), "Apun" (punishment learning rate), "R" (reward sensitivity), "P" (punishment sensitivity), "xi" (noise). -\strong{MODEL:} -5 parameter model without C (choice perseveration) but with xi (noise) (Seymour et al 2012, J Neuro) +\strong{MODEL:} 5 Parameter Model, without C (choice perseveration) but with xi (noise) (Seymour et al., 2012, J Neuro) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the 4-armed bandit task, there should be four columns of data with the labels -"subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the 4-Armed Bandit Task, there should be 4 columns of data with the + labels "subjID", "choice", "gain", "loss". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{A nominal integer representing which choice was chosen within the given trial (e.g. 1, 2, 3, or 4).} - \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} - \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on the given trial: 1, 2, 3, or 4.} + \item{"gain"}{Floating point value representing the amount of currency won on the given trial (e.g. 50, 100).} + \item{"loss"}{Floating point value representing the amount of currency lost on the given trial (e.g. 0, -50).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- bandit4arm_lapse("example", 3000, 1000, 4, 4) # 4 chains, 4 cores (parallel processing) +output <- bandit4arm_lapse("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,11 +145,12 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) } - } \references{ -Seymour, Daw, Roiser, Dayan, & Dolan (2012) Serotonin Selectively Modulates Reward Value in Human Decision-Making. J Neuro, 32(17), 5833-5842. +Seymour, Daw, Roiser, Dayan, & Dolan (2012). Serotonin Selectively Modulates Reward Value in + Human Decision-Making. J Neuro, 32(17), 5833-5842. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/bart_par4.Rd b/man/bart_par4.Rd index d9436f98..50dba68b 100644 --- a/man/bart_par4.Rd +++ b/man/bart_par4.Rd @@ -5,109 +5,138 @@ \title{Balloon Analogue Risk Task (Ravenzwaaij et al., 2011, Journal of Mathematical Psychology)} \usage{ bart_par4(data = "choose", niter = 4000, nwarmup = 1000, - nchain = 4, ncore = 1, nthin = 1, inits = "fixed", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "pumps", and "explosion". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "pumps", "explosion". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"bart_par4"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("bart_par4").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Balloon Analogue Risk Task with the following 4 parameters: "phi" (prior belief of the balloon not going to be burst), "eta" (updating rate), "gam" (risk-taking parameter), and "tau" (inverse temperature).\cr\cr +Hierarchical Bayesian Modeling of the Balloon Analogue Risk Task with the following parameters: + "phi" (prior belief of balloon not bursting), "eta" (updating rate), "gam" (risk-taking parameter), "tau" (inverse temperature). Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park}, \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang}, \href{https://ccs-lab.github.io/team/ayoung-lee/}{Ayoung Lee}, \href{https://ccs-lab.github.io/team/jeongbin-oh/}{Jeongbin Oh}, \href{https://ccs-lab.github.io/team/jiyoon-lee/}{Jiyoon Lee}, \href{https://ccs-lab.github.io/team/junha-jang/}{Junha Jang} -\strong{MODEL:} -Reparameterized version (by Harhim Park & Jaeyeong Yang) of Balloon Analogue Risk Task model (Ravenzwaaij et al., 2011) with four parameters +\strong{MODEL:} Re-parameterized version (by Harhim Park & Jaeyeong Yang) of BART Model (Ravenzwaaij et al., 2011) with 4 parameters } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Balloon Analogue Risk Task, there should be three columns of data with the labels -"subjID", "pumps", "explosion". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Balloon Analogue Risk Task, there should be 3 columns of data with the + labels "subjID", "pumps", "explosion". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"pumps"}}{The number of pumps} - \item{\code{"explosion"}}{0: intact, 1: burst} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"pumps"}{The number of pumps.} + \item{"explosion"}{0: intact, 1: burst} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- bart_par4(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- bart_par4("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,14 +146,13 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) - - } } \references{ -van Ravenzwaaij, D., Dutilh, G., & Wagenmakers, E. J. (2011). Cognitive model decomposition of the BART: Assessment and application. -Journal of Mathematical Psychology, 55(1), 94-105. +van Ravenzwaaij, D., Dutilh, G., & Wagenmakers, E. J. (2011). Cognitive model decomposition of the + BART: Assessment and application. Journal of Mathematical Psychology, 55(1), 94-105. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/choiceRT_ddm.Rd b/man/choiceRT_ddm.Rd index 4dfe0980..4e6cceee 100644 --- a/man/choiceRT_ddm.Rd +++ b/man/choiceRT_ddm.Rd @@ -2,115 +2,149 @@ % Please edit documentation in R/choiceRT_ddm.R \name{choiceRT_ddm} \alias{choiceRT_ddm} -\title{Choice Reaction Time task, drift diffusion modeling} +\title{Choice Reaction Time Task} \usage{ -choiceRT_ddm(data = "choose", niter = 3000, nwarmup = 1000, - nchain = 4, ncore = 1, nthin = 1, inits = "fixed", - indPars = "mean", saveDir = NULL, RTbound = 0.1, - modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, - adapt_delta = 0.95, stepsize = 1, max_treedepth = 10) +choiceRT_ddm(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID, ""choice", and "RT". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "RT". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{RTbound}{A floating point number representing the lower bound (i.e. minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{inc_postpred}{\strong{(Currently not available.)} +Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{inc_postpred}{(\strong{Not currently available for DDM models}) Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{RTbound}: Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} } \value{ -\code{modelData} A class \code{'hBayesDM'} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"choiceRT_ddm"}).} - \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter - values (as specified by \code{'indPars'}) for each subject.} - \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("choiceRT_ddm").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of choice/reaction time data with the following parameters: "alpha" (boundary separation), "beta" (bias), "delta" (drift rate), "tau" (non-decision time). -The code is based on codes/comments by Guido Biele, Joseph Burling, Andrew Ellis, and potentially others @ Stan mailing +Hierarchical Bayesian Modeling of the Choice Reaction Time Task with the following parameters: + "alpha" (boundary separation), "beta" (bias), "delta" (drift rate), "tau" (non-decision time). + +\strong{MODEL:} Drift Diffusion Model (Ratcliff, 1978, Psychological Review)\cr *Note that this implementation is \strong{not} the full Drift Diffusion Model as described in Ratcliff (1978). This implementation estimates the drift rate, boundary separation, starting point, and non-decision time; but not the between- and within-trial variances in these parameters. -\strong{MODEL:} -Ratcliff drift diffusion model - multiple subjects. Note that this implementation is \strong{not} the full drift diffusion model as described in -Ratcliff (1978). This implementation estimates the drift rate, boundary separation, starting point, and non-decision time, but not the between- -and within-trial variances in these parameters. +Code for this model is based on codes/comments by Guido Biele, Joseph Burling, Andrew Ellis, and potential others @ Stan mailing + +Parameters of the DDM (parameter names in Ratcliff), from \url{https://github.com/gbiele/stan_wiener_test/blob/master/stan_wiener_test.R} +\cr - alpha (a): Boundary separation or Speed-accuracy trade-off (high alpha means high accuracy). 0 < alpha +\cr - beta (b): Initial bias, for either response (beta > 0.5 means bias towards "upper" response 'A'). 0 < beta < 1 +\cr - delta (v): Drift rate; Quality of the stimulus (delta close to 0 means ambiguous stimulus or weak ability). 0 < delta +\cr - tau (ter): Non-decision time + Motor response time + encoding time (high means slow encoding, execution). 0 < tau (in seconds) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For choice/reaction-time tasks, there should be three columns of data with the labels "subjID", "choice", and "RT". -It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Choice Reaction Time Task, there should be 3 columns of data with the + labels "subjID", "choice", "RT". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer representing the choice made on the current trial. Lower/upper boundary or left/right choices should be coded as 1/2 (e.g., 1 1 1 2 1 2).} - \item{\code{"RT"}}{A floating number the choice reaction time in \strong{seconds}. (e.g., 0.435 0.383 0.314 0.309, etc.).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Choice made for the current trial, coded as \code{1}/\code{2} to indicate lower/upper boundary or left/right choices (e.g., 1 1 1 2 1 2).} + \item{"RT"}{Choice reaction time for the current trial, in \strong{seconds} (e.g., 0.435 0.383 0.314 0.309, etc.).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "stimulus_name", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- choiceRT_ddm(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- choiceRT_ddm("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -124,10 +158,8 @@ printFit(output) } \references{ Ratcliff, R. (1978). A theory of memory retrieval. Psychological Review, 85(2), 59-108. http://doi.org/10.1037/0033-295X.85.2.59 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/choiceRT_ddm_single.Rd b/man/choiceRT_ddm_single.Rd index 7f189e0a..48d6fd5e 100644 --- a/man/choiceRT_ddm_single.Rd +++ b/man/choiceRT_ddm_single.Rd @@ -2,115 +2,149 @@ % Please edit documentation in R/choiceRT_ddm_single.R \name{choiceRT_ddm_single} \alias{choiceRT_ddm_single} -\title{Choice Reaction Time task, drift diffusion modeling} +\title{Choice Reaction Time Task} \usage{ -choiceRT_ddm_single(data = "choose", niter = 3000, nwarmup = 1000, - nchain = 4, ncore = 1, nthin = 1, inits = "fixed", - indPars = "mean", saveDir = NULL, RTbound = 0.1, - modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, - adapt_delta = 0.95, stepsize = 1, max_treedepth = 10) +choiceRT_ddm_single(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID, ""choice", and "RT". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "RT". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{RTbound}{A floating point number representing the lower bound (i.e. minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{inc_postpred}{\strong{(Currently not available.)} +Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{inc_postpred}{(\strong{Not currently available for DDM models}) Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{RTbound}: Floating point value representing the lower bound (i.e., minimum allowed) reaction time. Defaults to 0.1 (100 milliseconds).} } \value{ -\code{modelData} A class \code{'hBayesDM'} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"choiceRT_ddm_single"}).} - \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter - values (as specified by \code{'indPars'}) for the single subject.} - \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("choiceRT_ddm_single").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Individual Bayesian Modeling of choice/reaction time data with the following parameters: "alpha" (boundary separation), "beta" (bias), "delta" (drift rate), "tau" (non-decision time). -The code is based on codes/comments by Guido Biele, Joseph Burling, Andrew Ellis, and potentially others @ Stan mailing +Individual Bayesian Modeling of the Choice Reaction Time Task with the following parameters: + "alpha" (boundary separation), "beta" (bias), "delta" (drift rate), "tau" (non-decision time). + +\strong{MODEL:} Drift Diffusion Model (Ratcliff, 1978, Psychological Review)\cr *Note that this implementation is \strong{not} the full Drift Diffusion Model as described in Ratcliff (1978). This implementation estimates the drift rate, boundary separation, starting point, and non-decision time; but not the between- and within-trial variances in these parameters. -\strong{MODEL:} -Ratcliff drift diffusion model - single subject. Note that this implementation is \strong{not} the full drift diffusion model as described in -Ratcliff (1978). This implementation estimates the drift rate, boundary separation, starting point, and non-decision time, but not the between- -and within-trial variances in these parameters. +Code for this model is based on codes/comments by Guido Biele, Joseph Burling, Andrew Ellis, and potential others @ Stan mailing + +Parameters of the DDM (parameter names in Ratcliff), from \url{https://github.com/gbiele/stan_wiener_test/blob/master/stan_wiener_test.R} +\cr - alpha (a): Boundary separation or Speed-accuracy trade-off (high alpha means high accuracy). 0 < alpha +\cr - beta (b): Initial bias, for either response (beta > 0.5 means bias towards "upper" response 'A'). 0 < beta < 1 +\cr - delta (v): Drift rate; Quality of the stimulus (delta close to 0 means ambiguous stimulus or weak ability). 0 < delta +\cr - tau (ter): Non-decision time + Motor response time + encoding time (high means slow encoding, execution). 0 < tau (in seconds) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of the subject of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For choice/reaction-time tasks, there should be two columns of data with the labels "subjID", "choice", and "RT". -It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Choice Reaction Time Task, there should be 3 columns of data with the + labels "subjID", "choice", "RT". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer representing the choice made on the current trial. Lower/upper boundary or left/right choices should be coded as 1/2 (e.g., 1 1 1 2 1 2).} - \item{\code{"RT"}}{A floating number the choice reaction time in \strong{seconds}. (e.g., 0.435 0.383 0.314 0.309, etc.).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Choice made for the current trial, coded as \code{1}/\code{2} to indicate lower/upper boundary or left/right choices (e.g., 1 1 1 2 1 2).} + \item{"RT"}{Choice reaction time for the current trial, in \strong{seconds} (e.g., 0.435 0.383 0.314 0.309, etc.).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "subjID", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- choiceRT_ddm_single(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- choiceRT_ddm_single("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -124,10 +158,8 @@ printFit(output) } \references{ Ratcliff, R. (1978). A theory of memory retrieval. Psychological Review, 85(2), 59-108. http://doi.org/10.1037/0033-295X.85.2.59 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/cra_exp.Rd b/man/cra_exp.Rd index 9ec4af51..7fc6786e 100644 --- a/man/cra_exp.Rd +++ b/man/cra_exp.Rd @@ -2,121 +2,140 @@ % Please edit documentation in R/cra_exp.R \name{cra_exp} \alias{cra_exp} -\title{Choice under Risk and Ambiguity Task} +\title{Choice Under Risk and Ambiguity Task} \usage{ -cra_exp(data = "choose", niter = 2000, nwarmup = 1000, nchain = 1, +cra_exp(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: -"subjID", "prob", "ambig", "reward_var", "reward_fix", and "choice". -See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "prob", "ambig", "reward_var", "reward_fix", "choice". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "sv", "sv_fix", "sv_var", "p_var".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"cra_exp"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("cra_exp").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Choice under Risk and Ambiguity Task -with the following parameters: -"alpha" (risk attitude), -"beta" (ambiguity attitude), and -"gamma" (inverse temperature). +Hierarchical Bayesian Modeling of the Choice Under Risk and Ambiguity Task with the following parameters: + "alpha" (risk attitude), "beta" (ambiguity attitude), "gamma" (inverse temperature). Contributor: \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} -\strong{MODEL:} -Exponential subjective value model (Hsu et al., 2005, Science) +\strong{MODEL:} Exponential Subjective Value Model (Hsu et al., 2005, Science) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Choice under Risk and Ambiguity Task, there should be five columns of data with the labels -"subjID", "prob", "ambig", "reward_var", "reward_fix" and "choice". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Choice Under Risk and Ambiguity Task, there should be 6 columns of data with the + labels "subjID", "prob", "ambig", "reward_var", "reward_fix", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"prob"}}{Objective probability of a variable lottery.} - \item{\code{"ambig"}}{Ambiguity levels of variable lotteries. For a risky lottery, \code{"ambig"} equals 0, and more than zero for an ambiguous lottery} - \item{\code{"reward_var"}}{Amounts of reward values in variable lotteries. \code{"reward_var"} is assumed to be greater than zero.} - \item{\code{"reward_fix"}}{Amounts of reward values in fixed lotteries. \code{"reward_fix"} is assumed to be greater than zero.} - \item{\code{"choice"}}{If the variable lottery was taken, \code{"choice"} equals 1, otherwise 0.} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"prob"}{Objective probability of the variable lottery.} + \item{"ambig"}{Ambiguity level of the variable lottery (0 for risky lottery; greater than 0 for ambiguous lottery).} + \item{"reward_var"}{Amount of reward in variable lottery. Assumed to be greater than zero.} + \item{"reward_fix"}{Amount of reward in fixed lottery. Assumed to be greater than zero.} + \item{"choice"}{If the variable lottery was selected, choice == 1; otherwise choice == 0.} + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- cra_exp(data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) +output <- cra_exp("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -129,10 +148,11 @@ printFit(output) } } \references{ -Hsu, M., Bhatt, M., Adolphs, R., Tranel, D., & Camerer, C. F. (2005). -Neural systems responding to degrees of uncertainty in human decision-making. -Science, 310(5754), 1680–1683. https://doi.org/10.1126/science.1115327 +Hsu, M., Bhatt, M., Adolphs, R., Tranel, D., & Camerer, C. F. (2005). Neural systems responding + to degrees of uncertainty in human decision-making. Science, 310(5754), 1680-1683. + https://doi.org/10.1126/science.1115327 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/cra_linear.Rd b/man/cra_linear.Rd index 1cb6f4a6..f0bbb5d0 100644 --- a/man/cra_linear.Rd +++ b/man/cra_linear.Rd @@ -2,121 +2,141 @@ % Please edit documentation in R/cra_linear.R \name{cra_linear} \alias{cra_linear} -\title{Choice under Risk and Ambiguity Task} +\title{Choice Under Risk and Ambiguity Task} \usage{ -cra_linear(data = "choose", niter = 2000, nwarmup = 1000, - nchain = 1, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) +cra_linear(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: -"subjID", "prob", "ambig", "reward_var", "reward_fix", and "choice". -See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "prob", "ambig", "reward_var", "reward_fix", "choice". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "sv", "sv_fix", "sv_var", "p_var".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"cra_linear"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("cra_linear").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Choice under Risk and Ambiguity Task -with the following parameters: -"alpha" (risk attitude), -"beta" (ambiguity attitude), and -"gamma" (inverse temperature). +Hierarchical Bayesian Modeling of the Choice Under Risk and Ambiguity Task with the following parameters: + "alpha" (risk attitude), "beta" (ambiguity attitude), "gamma" (inverse temperature). Contributor: \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} -\strong{MODEL:} -Linear Subjective Value Model (Levy et al., 2010, J Neurophysiol) +\strong{MODEL:} Linear Subjective Value Model (Levy et al., 2010, J Neurophysiol) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Choice under Risk and Ambiguity Task, there should be five columns of data with the labels -"subjID", "prob", "ambig", "reward_var", "reward_fix" and "choice". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Choice Under Risk and Ambiguity Task, there should be 6 columns of data with the + labels "subjID", "prob", "ambig", "reward_var", "reward_fix", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"prob"}}{Objective probability of a variable lottery.} - \item{\code{"ambig"}}{Ambiguity levels of variable lotteries. For a risky lottery, \code{"ambig"} equals 0, and more than zero for an ambiguous lottery} - \item{\code{"reward_var"}}{Amounts of reward values in variable lotteries. \code{"reward_var"} is assumed to be greater than zero.} - \item{\code{"reward_fix"}}{Amounts of reward values in fixed lotteries. \code{"reward_fix"} is assumed to be greater than zero.} - \item{\code{"choice"}}{If the variable lottery was taken, \code{"choice"} equals 1, otherwise 0.} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"prob"}{Objective probability of the variable lottery.} + \item{"ambig"}{Ambiguity level of the variable lottery (0 for risky lottery; greater than 0 for ambiguous lottery).} + \item{"reward_var"}{Amount of reward in variable lottery. Assumed to be greater than zero.} + \item{"reward_fix"}{Amount of reward in fixed lottery. Assumed to be greater than zero.} + \item{"choice"}{If the variable lottery was selected, choice == 1; otherwise choice == 0.} + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- cra_linear(data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) +output <- cra_linear("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -129,9 +149,11 @@ printFit(output) } } \references{ -Levy, I., Snell, J., Nelson, A. J., Rustichini, A., & Glimcher, P. W. (2010). Neural representation of subjective value under risk and ambiguity. -Journal of neurophysiology, 103(2), 1036-1047. +Levy, I., Snell, J., Nelson, A. J., Rustichini, A., & Glimcher, P. W. (2010). Neural + representation of subjective value under risk and ambiguity. Journal of Neurophysiology, + 103(2), 1036-1047. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/dbdm_prob_weight.Rd b/man/dbdm_prob_weight.Rd new file mode 100644 index 00000000..55d2df61 --- /dev/null +++ b/man/dbdm_prob_weight.Rd @@ -0,0 +1,163 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbdm_prob_weight.R +\name{dbdm_prob_weight} +\alias{dbdm_prob_weight} +\title{Description Based Decison Making Task} +\usage{ +dbdm_prob_weight(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) +} +\arguments{ +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "opt1hprob", "opt2hprob", "opt1hval", "opt1lval", "opt2hval", "opt2lval", "choice". See \bold{Details} below for more information.} + +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} + +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} + +\item{nchain}{Number of Markov chains to run. Defaults to 4.} + +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} + +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} + +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} + +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} + +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} + +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} + +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} + +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} + +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} + +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} + +\item{...}{Not used for this model.} +} +\value{ +A class "hBayesDM" object \code{modelData} with the following components: +\describe{ + \item{\code{model}}{Character value that is the name of the model ("dbdm_prob_weight").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + +} +} +\description{ +Hierarchical Bayesian Modeling of the Description Based Decison Making Task with the following parameters: + "tau" (probability weight function), "rho" (subject utility function), "lambda" (loss aversion parameter), "beta" (inverse softmax temperature). + +\strong{MODEL:} Probability Weight Function (Erev et al., 2010; Hertwig et al., 2004; Jessup et al., 2008) +} +\details{ +This section describes some of the function arguments in greater detail. + +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Description Based Decison Making Task, there should be 8 columns of data with the + labels "subjID", "opt1hprob", "opt2hprob", "opt1hval", "opt1lval", "opt2hval", "opt2lval", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: +\describe{ + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"opt1hprob"}{Possiblity of getting higher value of outcome(opt1hval) when choosing option 1.} + \item{"opt2hprob"}{Possiblity of getting higher value of outcome(opt2hval) when choosing option 2.} + \item{"opt1hval"}{Possible (with opt1hprob probability) outcome of option 1.} + \item{"opt1lval"}{Possible (with (1 - opt1hprob) probability) outcome of option 1.} + \item{"opt2hval"}{Possible (with opt2hprob probability) outcome of option 2.} + \item{"opt2lval"}{Possible (with (1 - opt2hprob) probability) outcome of option 2.} + \item{"choice"}{If option 1 was selected, choice == 1; else if option 2 was selected, choice == 2.} + +} +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. +} +\examples{ +\dontrun{ +# Run the model and store results in "output" +output <- dbdm_prob_weight("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) + +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") + +# Check Rhat values (all Rhat values should be less than or equal to 1.1) +rhat(output) + +# Plot the posterior distributions of the hyper-parameters (distributions should be unimodal) +plot(output) + +# Show the WAIC and LOOIC model fit estimates +printFit(output) +} +} +\references{ +Erev, I., Ert, E., Roth, A. E., Haruvy, E., Herzog, S. M., Hau, R., ... & Lebiere, C. (2010). A + choice prediction competition: Choices from experience and from description. Journal of + Behavioral Decision Making, 23(1), 15-47. + +Hertwig, R., Barron, G., Weber, E. U., & Erev, I. (2004). Decisions from experience and the + effect of rare events in risky choice. Psychological science, 15(8), 534-539. + +Jessup, R. K., Bishara, A. J., & Busemeyer, J. R. (2008). Feedback produces divergence from + prospect theory in descriptive choice. Psychological Science, 19(10), 1015-1022. +} +\seealso{ +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} +} diff --git a/man/dd_cs.Rd b/man/dd_cs.Rd index 783f465a..4a6b4392 100644 --- a/man/dd_cs.Rd +++ b/man/dd_cs.Rd @@ -4,112 +4,136 @@ \alias{dd_cs} \title{Delay Discounting Task} \usage{ -dd_cs(data = "choose", niter = 3000, nwarmup = 1000, nchain = 4, - ncore = 1, nthin = 1, inits = "fixed", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) +dd_cs(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, + ncore = 1, nthin = 1, inits = "random", indPars = "mean", + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"dd_cs"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("dd_cs").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Delay Discounting Task using the following parameters: "r" (exponential discounting rate; impatience), "s" (time-sensitivity), "beta" (inverse temp.). +Hierarchical Bayesian Modeling of the Delay Discounting Task with the following parameters: + "r" (exponential discounting rate), "s" (impatience), "beta" (inverse temperature). -\strong{MODEL:} -Constant-Sensitivity (CS) Model (Ebert & Prelec, 2007, Management Science) +\strong{MODEL:} Constant-Sensitivity (CS) Model (Ebert & Prelec, 2007, Management Science) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Delay Discounting Task, there should be six columns of data -with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Delay Discounting Task, there should be 6 columns of data with the + labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} - \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} - \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} - \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} - \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} + \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} + \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} + \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} + \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- dd_cs(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- dd_cs("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -122,12 +146,10 @@ printFit(output) } } \references{ -Ebert, J. E. J., & Prelec, D. (2007). The Fragility of Time: Time-Insensitivity and Valuation of the Near and Far Future. -Management Science. http://doi.org/10.1287/mnsc.1060.0671 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Ebert, J. E. J., & Prelec, D. (2007). The Fragility of Time: Time-Insensitivity and Valuation of + the Near and Far Future. Management Science. http://doi.org/10.1287/mnsc.1060.0671 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/dd_cs_single.Rd b/man/dd_cs_single.Rd index c84e05d5..36627ab0 100644 --- a/man/dd_cs_single.Rd +++ b/man/dd_cs_single.Rd @@ -2,108 +2,139 @@ % Please edit documentation in R/dd_cs_single.R \name{dd_cs_single} \alias{dd_cs_single} -\title{Delay Discounting Task (Ebert & Prelec, 2007)} +\title{Delay Discounting Task} \usage{ -dd_cs_single(data = "choose", niter = 3000, nwarmup = 1000, - nchain = 4, ncore = 1, nthin = 1, inits = "fixed", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) +dd_cs_single(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{'hBayesDM'} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model ("dd_cs_single").} - \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter - values (as specified by \code{'indPars'}) for each subject.} - \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("dd_cs_single").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Individual Bayesian Modeling of the Delay Discounting Task using the following parameters: "r" (exponential discounting rate), "s" (impatience), "beta" (inverse temp.). +Individual Bayesian Modeling of the Delay Discounting Task with the following parameters: + "r" (exponential discounting rate), "s" (impatience), "beta" (inverse temperature). -\strong{MODEL:} -Constant-Sensitivity (CS) Model (Ebert & Prelec, 2007, Management Science) +\strong{MODEL:} Constant-Sensitivity (CS) Model (Ebert & Prelec, 2007, Management Science) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Delay Discounting Task, there should be six columns of data -with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Delay Discounting Task, there should be 6 columns of data with the + labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} - \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} - \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} - \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} - \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} + \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} + \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} + \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} + \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -generate the posterior. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- dd_cs_single(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- dd_cs_single("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -115,3 +146,11 @@ plot(output) printFit(output) } } +\references{ +Ebert, J. E. J., & Prelec, D. (2007). The Fragility of Time: Time-Insensitivity and Valuation of + the Near and Far Future. Management Science. http://doi.org/10.1287/mnsc.1060.0671 +} +\seealso{ +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} +} diff --git a/man/dd_exp.Rd b/man/dd_exp.Rd index f3a916c2..22ff861a 100644 --- a/man/dd_exp.Rd +++ b/man/dd_exp.Rd @@ -4,112 +4,136 @@ \alias{dd_exp} \title{Delay Discounting Task} \usage{ -dd_exp(data = "choose", niter = 3000, nwarmup = 1000, nchain = 4, +dd_exp(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"dd_exp"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("dd_exp").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Delay Discounting Task using the following parameters: "r" (exponential discounting rate) & "beta" (inverse temp.). +Hierarchical Bayesian Modeling of the Delay Discounting Task with the following parameters: + "r" (exponential discounting rate), "beta" (inverse temperature). -\strong{MODEL:} -Exponential Model (Samuelson, 1937, The Review of Economic Studies) +\strong{MODEL:} Exponential Model (Samuelson, 1937, The Review of Economic Studies) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Delay Discounting Task, there should be six columns of data -with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Delay Discounting Task, there should be 6 columns of data with the + labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} - \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} - \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} - \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} - \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} + \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} + \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} + \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} + \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- dd_exp(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- dd_exp("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -122,11 +146,10 @@ printFit(output) } } \references{ -Samuelson, P. A. (1937). A Note on Measurement of Utility. The Review of Economic Studies, 4(2), 155. http://doi.org/10.2307/2967612 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Samuelson, P. A. (1937). A Note on Measurement of Utility. The Review of Economic Studies, 4(2), + 155. http://doi.org/10.2307/2967612 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/dd_hyperbolic.Rd b/man/dd_hyperbolic.Rd index 580e7623..8080878e 100644 --- a/man/dd_hyperbolic.Rd +++ b/man/dd_hyperbolic.Rd @@ -4,112 +4,137 @@ \alias{dd_hyperbolic} \title{Delay Discounting Task} \usage{ -dd_hyperbolic(data = "choose", niter = 3000, nwarmup = 1000, +dd_hyperbolic(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"dd_hyperbolic"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("dd_hyperbolic").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Delay Discounting Task using the following parameters: "k" (discounting rate), "beta" (inverse temperature). +Hierarchical Bayesian Modeling of the Delay Discounting Task with the following parameters: + "k" (discounting rate), "beta" (inverse temperature). -\strong{MODEL:} -Hyperbolic Model (Mazur, 1987) +\strong{MODEL:} Hyperbolic Model (Mazur, 1987) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Delay Discounting Task, there should be six columns of data -with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Delay Discounting Task, there should be 6 columns of data with the + labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} - \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} - \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} - \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} - \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} + \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} + \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} + \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} + \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- dd_hyperbolic(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- dd_hyperbolic("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -123,10 +148,8 @@ printFit(output) } \references{ Mazur, J. E. (1987). An adjustment procedure for studying delayed reinforcement. - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/dd_hyperbolic_single.Rd b/man/dd_hyperbolic_single.Rd index 6fd303dd..ae10b412 100644 --- a/man/dd_hyperbolic_single.Rd +++ b/man/dd_hyperbolic_single.Rd @@ -2,108 +2,139 @@ % Please edit documentation in R/dd_hyperbolic_single.R \name{dd_hyperbolic_single} \alias{dd_hyperbolic_single} -\title{Delay Discounting Task (Ebert & Prelec, 2007)} +\title{Delay Discounting Task} \usage{ -dd_hyperbolic_single(data = "choose", niter = 3000, nwarmup = 1000, +dd_hyperbolic_single(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{'hBayesDM'} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model ("dd_hyperbolic_single").} - \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter - values (as specified by \code{'indPars'}) for each subject.} - \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("dd_hyperbolic_single").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Individual Bayesian Modeling of the Delay Discounting Task using the following parameters: "k" (discounting rate), "beta" (inverse temperature). +Individual Bayesian Modeling of the Delay Discounting Task with the following parameters: + "k" (discounting rate), "beta" (inverse temperature). -\strong{MODEL:} -Hyperbolic +\strong{MODEL:} Hyperbolic Model (Mazur, 1987) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Delay Discounting Task, there should be six columns of data -with the labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", and "choice". -It is not necessary for the columns to be in this particular order, however it is necessary that they be labelled -correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Delay Discounting Task, there should be 6 columns of data with the + labels "subjID", "delay_later", "amount_later", "delay_sooner", "amount_sooner", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"delay_later"}}{An integer representing the delayed days for the later option within the given trial. (e.g., 1 6 15 28 85 170).} - \item{\code{"amount_later"}}{A floating number representing the amount for the later option within the given trial. (e.g., 10.5 38.3 13.4 31.4 30.9, etc.).} - \item{\code{"delay_sooner"}}{An integer representing the delayed days for the sooner option (e.g., 0 0 0 0).} - \item{\code{"amount_sooner"}}{A floating number representing the amount for the sooner option (e.g., 10 10 10 10).} - \item{\code{"choice"}}{An integer value representing the chosen option within the given trial (e.g., 0 = instant amount, 1 = delayed amount)} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"delay_later"}{An integer representing the delayed days for the later option (e.g. 1, 6, 28).} + \item{"amount_later"}{A floating point number representing the amount for the later option (e.g. 10.5, 13.4, 30.9).} + \item{"delay_sooner"}{An integer representing the delayed days for the sooner option (e.g. 0).} + \item{"amount_sooner"}{A floating point number representing the amount for the sooner option (e.g. 10).} + \item{"choice"}{If amount_later was selected, choice == 1; else if amount_sooner was selected, choice == 0.} + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -generate the posterior. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- dd_hyperbolic_single(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- dd_hyperbolic_single("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -115,3 +146,10 @@ plot(output) printFit(output) } } +\references{ +Mazur, J. E. (1987). An adjustment procedure for studying delayed reinforcement. +} +\seealso{ +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} +} diff --git a/man/gng_m1.Rd b/man/gng_m1.Rd index e995c46e..c426389b 100644 --- a/man/gng_m1.Rd +++ b/man/gng_m1.Rd @@ -4,107 +4,136 @@ \alias{gng_m1} \title{Orthogonalized Go/Nogo Task} \usage{ -gng_m1(data = "choose", niter = 5000, nwarmup = 2000, nchain = 4, +gng_m1(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "cue", "keyPressed", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "cue", "keyPressed", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "Qgo", "Qnogo", "Wgo", "Wnogo".} -\item{modelRegressor}{Exporting model-based regressors (Q(Go), Q(NoGo))? TRUE or FALSE.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"gng_m1"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("gng_m1").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Orthogonolized Go/Nogo Task using the following parameters: "xi" (noise), "ep" (learning rate), and "rho" (effective size). +Hierarchical Bayesian Modeling of the Orthogonalized Go/Nogo Task with the following parameters: + "xi" (noise), "ep" (learning rate), "rho" (effective size). -\strong{MODEL:} -RW + noise (Guitart-Masip et al., 2012, Neuroimage) +\strong{MODEL:} RW + noise (Guitart-Masip et al., 2012, Neuroimage) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Go/No-Go Task, there should be four columns of data with the labels "subjID", -"cue", "keyPressed", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Orthogonalized Go/Nogo Task, there should be 4 columns of data with the + labels "subjID", "cue", "keyPressed", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"cue"}}{A nominal integer that specifies the cue shown within the given trial (e.g. 1, 2, 3, or 4 in the GNG).} - \item{\code{"keyPressed"}}{A binary value representing whether or not the participant responded on the given trial (1 == Press; 0 == No press).} - \item{\code{"outcome"}}{A 1, 0, or -1 for each given trial (1 == Positive Feedback; 0 == Neutral Feedback; -1 == Negative Feedback).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"cue"}{Nominal integer representing the cue shown for that trial: 1, 2, 3, or 4.} + \item{"keyPressed"}{Binary value representing the subject's response for that trial (where Press == 1; No press == 0).} + \item{"outcome"}{Ternary value representing the outcome of that trial (where Positive feedback == 1; Neutral feedback == 0; Negative feedback == -1).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- gng_m1(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- gng_m1("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,12 +146,11 @@ printFit(output) } } \references{ -Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). Go and no-go learning in -reward and punishment: Interactions between affect and effect. Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). + Go and no-go learning in reward and punishment: Interactions between affect and effect. + Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/gng_m2.Rd b/man/gng_m2.Rd index 4140ad86..847be5ea 100644 --- a/man/gng_m2.Rd +++ b/man/gng_m2.Rd @@ -4,107 +4,136 @@ \alias{gng_m2} \title{Orthogonalized Go/Nogo Task} \usage{ -gng_m2(data = "choose", niter = 5000, nwarmup = 2000, nchain = 4, +gng_m2(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "cue", "keyPressed", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "cue", "keyPressed", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "Qgo", "Qnogo", "Wgo", "Wnogo".} -\item{modelRegressor}{Exporting model-based regressors (Q(Go), Q(NoGo))? TRUE or FALSE.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"gng_m2"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("gng_m2").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Orthogonolized Go/Nogo Task using the following parameters: "xi" (noise), "ep" (learning rate), "b" (action bias) and "rho" (effective size). +Hierarchical Bayesian Modeling of the Orthogonalized Go/Nogo Task with the following parameters: + "xi" (noise), "ep" (learning rate), "b" (action bias), "rho" (effective size). -\strong{MODEL:} -RW + noise + bias (Guitart-Masip et al., 2012, Neuroimage) +\strong{MODEL:} RW + noise + bias (Guitart-Masip et al., 2012, Neuroimage) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Go/No-Go Task, there should be four columns of data with the labels "subjID", -"cue", "keyPressed", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Orthogonalized Go/Nogo Task, there should be 4 columns of data with the + labels "subjID", "cue", "keyPressed", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"cue"}}{A nominal integer that specifies the cue shown within the given trial (e.g. 1, 2, 3, or 4 in the GNG).} - \item{\code{"keyPressed"}}{A binary value representing whether or not the participant responded on the given trial (1 == Press; 0 == No press).} - \item{\code{"outcome"}}{A 1, 0, or -1 for each given trial (1 == Positive Feedback; 0 == Neutral Feedback; -1 == Negative Feedback).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"cue"}{Nominal integer representing the cue shown for that trial: 1, 2, 3, or 4.} + \item{"keyPressed"}{Binary value representing the subject's response for that trial (where Press == 1; No press == 0).} + \item{"outcome"}{Ternary value representing the outcome of that trial (where Positive feedback == 1; Neutral feedback == 0; Negative feedback == -1).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- gng_m2(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- gng_m2("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,12 +146,11 @@ printFit(output) } } \references{ -Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). Go and no-go learning in -reward and punishment: Interactions between affect and effect. Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). + Go and no-go learning in reward and punishment: Interactions between affect and effect. + Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/gng_m3.Rd b/man/gng_m3.Rd index a492e491..cc29ad42 100644 --- a/man/gng_m3.Rd +++ b/man/gng_m3.Rd @@ -4,107 +4,136 @@ \alias{gng_m3} \title{Orthogonalized Go/Nogo Task} \usage{ -gng_m3(data = "choose", niter = 5000, nwarmup = 2000, nchain = 4, - ncore = 1, nthin = 1, inits = "fixed", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) +gng_m3(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, + ncore = 1, nthin = 1, inits = "random", indPars = "mean", + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "cue", "keyPressed", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "cue", "keyPressed", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "Qgo", "Qnogo", "Wgo", "Wnogo", "SV".} -\item{modelRegressor}{Exporting model-based regressors (Q(Go), Q(NoGo))? TRUE or FALSE.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"gng_m3"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("gng_m3").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Orthogonolized Go/Nogo Task using the following parameters: "xi" (noise), "ep" (learning rate), "b" (action bias), "pi" (Pavlovian bias), and "rho" (effective size). +Hierarchical Bayesian Modeling of the Orthogonalized Go/Nogo Task with the following parameters: + "xi" (noise), "ep" (learning rate), "b" (action bias), "pi" (Pavlovian bias), "rho" (effective size). -\strong{MODEL:} -RW + noise + bias + pi (Guitart-Masip et al., 2012, Neuroimage) +\strong{MODEL:} RW + noise + bias + pi (Guitart-Masip et al., 2012, Neuroimage) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Go/No-Go Task, there should be four columns of data with the labels "subjID", -"cue", "keyPressed", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Orthogonalized Go/Nogo Task, there should be 4 columns of data with the + labels "subjID", "cue", "keyPressed", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"cue"}}{A nominal integer that specifies the cue shown within the given trial (e.g. 1, 2, 3, or 4 in the GNG).} - \item{\code{"keyPressed"}}{A binary value representing whether or not the participant responded on the given trial (1 == Press; 0 == No press).} - \item{\code{"outcome"}}{A 1, 0, or -1 for each given trial (1 == Positive Feedback; 0 == Neutral Feedback; -1 == Negative Feedback).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"cue"}{Nominal integer representing the cue shown for that trial: 1, 2, 3, or 4.} + \item{"keyPressed"}{Binary value representing the subject's response for that trial (where Press == 1; No press == 0).} + \item{"outcome"}{Ternary value representing the outcome of that trial (where Positive feedback == 1; Neutral feedback == 0; Negative feedback == -1).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- gng_m3(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- gng_m3("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,12 +146,11 @@ printFit(output) } } \references{ -Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). Go and no-go learning in -reward and punishment: Interactions between affect and effect. Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Guitart-Masip, M., Huys, Q. J. M., Fuentemilla, L., Dayan, P., Duzel, E., & Dolan, R. J. (2012). + Go and no-go learning in reward and punishment: Interactions between affect and effect. + Neuroimage, 62(1), 154-166. http://doi.org/10.1016/j.neuroimage.2012.04.024 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/gng_m4.Rd b/man/gng_m4.Rd index fd717472..0847073a 100644 --- a/man/gng_m4.Rd +++ b/man/gng_m4.Rd @@ -4,107 +4,136 @@ \alias{gng_m4} \title{Orthogonalized Go/Nogo Task} \usage{ -gng_m4(data = "choose", niter = 5000, nwarmup = 2000, nchain = 4, - ncore = 1, nthin = 1, inits = "fixed", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) +gng_m4(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, + ncore = 1, nthin = 1, inits = "random", indPars = "mean", + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "cue", "keyPressed", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "cue", "keyPressed", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "Qgo", "Qnogo", "Wgo", "Wnogo", "SV".} -\item{modelRegressor}{Exporting model-based regressors (Q(Go), Q(NoGo))? TRUE or FALSE.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"gng_m4"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("gng_m4").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Orthogonolized Go/Nogo Task using the following parameters: "xi" (noise), "ep" (learning rate), "b" (action bias), "pi" (Pavlovian bias), "rhoRew" (reward sensitivity), and "rhoPun" (punishment sensitivity) +Hierarchical Bayesian Modeling of the Orthogonalized Go/Nogo Task with the following parameters: + "xi" (noise), "ep" (learning rate), "b" (action bias), "pi" (Pavlovian bias), "rhoRew" (reward sensitivity), "rhoPun" (punishment sensitivity). -\strong{MODEL:} -RW (rew/pun) + noise + bias + pi (Cavanagh et al., 2013, J Neuro) +\strong{MODEL:} RW (rew/pun) + noise + bias + pi (Cavanagh et al., 2013, J Neuro) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Go/No-Go Task, there should be four columns of data with the labels "subjID", -"cue", "keyPressed", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Orthogonalized Go/Nogo Task, there should be 4 columns of data with the + labels "subjID", "cue", "keyPressed", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"cue"}}{A nominal integer that specifies the cue shown within the given trial (e.g. 1, 2, 3, or 4 in the GNG).} - \item{\code{"keyPressed"}}{A binary value representing whether or not the participant responded on the given trial (1 == Press; 0 == No press).} - \item{\code{"outcome"}}{A 1, 0, or -1 for each given trial (1 == Positive Feedback; 0 == Neutral Feedback; -1 == Negative Feedback).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"cue"}{Nominal integer representing the cue shown for that trial: 1, 2, 3, or 4.} + \item{"keyPressed"}{Binary value representing the subject's response for that trial (where Press == 1; No press == 0).} + \item{"outcome"}{Ternary value representing the outcome of that trial (where Positive feedback == 1; Neutral feedback == 0; Negative feedback == -1).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- gng_m4(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- gng_m4("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,12 +146,11 @@ printFit(output) } } \references{ -Cavanagh, J. F., Eisenberg, I., Guitart-Masip, M., Huys, Q., & Frank, M. J. (2013). Frontal Theta Overrides Pavlovian -Learning Biases. Journal of Neuroscience, 33(19), 8541-8548. http://doi.org/10.1523/JNEUROSCI.5754-12.2013 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Cavanagh, J. F., Eisenberg, I., Guitart-Masip, M., Huys, Q., & Frank, M. J. (2013). Frontal Theta + Overrides Pavlovian Learning Biases. Journal of Neuroscience, 33(19), 8541-8548. + http://doi.org/10.1523/JNEUROSCI.5754-12.2013 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/hBayesDM_model.Rd b/man/hBayesDM_model.Rd new file mode 100644 index 00000000..24d08052 --- /dev/null +++ b/man/hBayesDM_model.Rd @@ -0,0 +1,113 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/hBayesDM_model.R +\name{hBayesDM_model} +\alias{hBayesDM_model} +\title{hBayesDM Model Base Function} +\usage{ +hBayesDM_model(task_name, model_name, model_type = "", data_columns, + parameters, regressors = NULL, postpreds = "y_pred", + stanmodel_arg = NULL, preprocess_func) +} +\arguments{ +\item{task_name}{Character value for name of task. E.g. \code{"gng"}.} + +\item{model_name}{Character value for name of model. E.g. \code{"m1"}.} + +\item{model_type}{Character value for modeling type: \code{""} OR \code{"single"} OR +\code{"multipleB"}.} + +\item{data_columns}{Character vector of necessary column names for the data. E.g. +\code{c("subjID", "cue", "keyPressed", "outcome")}.} + +\item{parameters}{List of parameters, with information about their lower bound, plausible value, +upper bound. E.g. \code{list("xi" = c(0, 0.1, 1), "ep" = c(0, 0.2, 1), "rho" = c(0, exp(2), +Inf))}.} + +\item{regressors}{List of regressors, with information about their extracted dimensions. E.g. +\code{list("Qgo" = 2, "Qnogo" = 2, "Wgo" = 2, "Wnogo" = 2)}. OR if model-based regressors are +not available for this model, \code{NULL}.} + +\item{postpreds}{Character vector of name(s) for the trial-level posterior predictive +simulations. Default is \code{"y_pred"}. OR if posterior predictions are not yet available for +this model, \code{NULL}.} + +\item{stanmodel_arg}{Leave as \code{NULL} (default) for completed models. Else should either be a +character value (specifying the name of a Stan file) OR a \code{stanmodel} object (returned as +a result of running \code{\link[rstan]{stan_model}}).} + +\item{preprocess_func}{Function to preprocess the raw data before it gets passed to Stan. Takes +(at least) two arguments: a data.table object \code{raw_data} and a list object +\code{general_info}. Possible to include additional argument(s) to use during preprocessing. +Should return a list object \code{data_list}, which will then directly be passed to Stan.} +} +\value{ +A specific hBayesDM model function. +} +\description{ +The base function from which all hBayesDM model functions are created. + +Contributor: \href{https://ccs-lab.github.io/team/jethro-lee/}{Jethro Lee} +} +\details{ +\strong{task_name}: Typically same task models share the same data column requirements. + +\strong{model_name}: Typically different models are distinguished by their different list of + parameters. + +\strong{model_type} is one of the following three: +\describe{ + \item{\code{""}}{Modeling of multiple subjects. (Default hierarchical Bayesian analysis.)} + \item{\code{"single"}}{Modeling of a single subject.} + \item{\code{"multipleB"}}{Modeling of multiple subjects, where multiple blocks exist within + each subject.} +} + +\strong{data_columns} must be the entirety of necessary data columns used at some point in the R + or Stan code. I.e. \code{"subjID"} must always be included. In the case of 'multipleB' type + models, \code{"block"} should also be included as well. + +\strong{parameters} is a list object, whose keys are the parameters of this model. Each parameter + key must be assigned a numeric vector holding 3 elements: the parameter's lower bound, + plausible value, and upper bound. + +\strong{regressors} is a list object, whose keys are the model-based regressors of this model. + Each regressor key must be assigned a numeric value indicating the number of dimensions its + data will be extracted as. If model-based regressors are not available for this model, this + argument should just be \code{NULL}. + +\strong{postpreds} defaults to \code{"y_pred"}, but any other character vector holding + appropriate names is possible (c.f. Two-Step Task models). If posterior predictions are not yet + available for this model, this argument should just be \code{NULL}. + +\strong{stanmodel_arg} can be used by developers, during the developmental stage of creating a + new model function. If this argument is passed a character value, the Stan file with the + corresponding name will be used for model fitting. If this argument is passed a + \code{stanmodel} object, that \code{stanmodel} object will be used for model fitting. When + creation of the model function is complete, this argument should just be left as \code{NULL}. + +\strong{preprocess_func} is the part of the code that is specific to the model, and is thus + written in the specific model R file.\cr +Arguments for this function are: +\describe{ + \item{\code{raw_data}}{A data.table that holds the raw user data, which was read by using + \code{\link[data.table]{fread}}.} + \item{\code{general_info}}{A list that holds the general informations about the raw data, i.e. + \code{subjs}, \code{n_subj}, \code{t_subjs}, \code{t_max}, \code{b_subjs}, \code{b_max}.} + \item{\code{...}}{Optional additional argument(s) that specific model functions may want to + include. Examples of such additional arguments currently being used in hBayesDM models are: + \code{RTbound} (choiceRT_ddm models), \code{payscale} (igt models), and \code{trans_prob} (ts + models).} +} +Return value for this function should be: +\describe{ + \item{\code{data_list}}{A list with appropriately named keys (as required by the model Stan + file), holding the fully preprocessed user data.} +} +NOTE: Syntax for data.table slightly differs from that of data.frame. If you want to use + \code{raw_data} as a data.frame when writing the \code{preprocess_func}, simply begin with the + line: \code{raw_data <- as.data.frame(raw_data)}.\cr +NOTE: Because of allowing case & underscore insensitive column names in user data, + \code{raw_data} columns must now be referenced by their lowercase non-underscored versions, + e.g. \code{"subjid"}, within the code of the preprocess function.\cr +} +\keyword{internal} diff --git a/man/igt_orl.Rd b/man/igt_orl.Rd index f5b3ad1d..f2e7cfc2 100644 --- a/man/igt_orl.Rd +++ b/man/igt_orl.Rd @@ -4,113 +4,139 @@ \alias{igt_orl} \title{Iowa Gambling Task} \usage{ -igt_orl(data = "choose", niter = 3000, nwarmup = 1000, nchain = 4, +igt_orl(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - payscale = 100, saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "deck", "gain", and "loss". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "gain", "loss". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{payscale}{Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model ("igt_orl").} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("igt_orl").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Iowa Gambling Task using the following parameters: "Arew" (reward learning rate), "Apun" (punishment learning rate), "K" (perseverance decay), "betaF" (outcome frequency weight), and "betaP" (perseverance weight). +Hierarchical Bayesian Modeling of the Iowa Gambling Task with the following parameters: + "Arew" (reward learning rate), "Apun" (punishment learning rate), "K" (perseverance decay), "betaF" (outcome frequency weight), "betaP" (perseverance weight). Contributor: \href{https://ccs-lab.github.io/team/nate-haines/}{Nate Haines} -\strong{MODEL:} -Outcome-Representation Learning Model (Haines, Vassileva, & Ahn (in press) Cognitive Science)) +\strong{MODEL:} Outcome-Representation Learning Model (Haines et al., 2018, Cognitive Science) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Iowa Gambling Task, there should be four columns of data with the labels -"subjID", "deck", "gain", and "loss". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Iowa Gambling Task, there should be 4 columns of data with the + labels "subjID", "choice", "gain", "loss". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"deck"}}{A nominal integer representing which deck was chosen within the given trial (e.g. A, B, C, or D == 1, 2, 3, or 4 in the IGT).} - \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} - \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} + \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} + \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- igt_orl(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- igt_orl("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -123,12 +149,11 @@ printFit(output) } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Haines, N., Vassileva, J., Ahn, W.-Y. (in press). The Outcome-Representation Learning model: a novel reinforcement learning model of -the Iowa Gambling Task. Cognitive Science. +Haines, N., Vassileva, J., & Ahn, W.-Y. (2018). The Outcome-Representation Learning Model: A + Novel Reinforcement Learning Model of the Iowa Gambling Task. Cognitive Science. + https://doi.org/10.1111/cogs.12688 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/igt_pvl_decay.Rd b/man/igt_pvl_decay.Rd index 83c9a6a8..5b338806 100644 --- a/man/igt_pvl_decay.Rd +++ b/man/igt_pvl_decay.Rd @@ -4,111 +4,138 @@ \alias{igt_pvl_decay} \title{Iowa Gambling Task} \usage{ -igt_pvl_decay(data = "choose", niter = 3000, nwarmup = 1000, +igt_pvl_decay(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", payscale = 100, saveDir = NULL, - modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, - adapt_delta = 0.95, stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "gain", "loss". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{payscale}{Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model ("igt_pvl_decay").} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("igt_pvl_decay").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Iowa Gambling Task using the following parameters: "A" (decay rate), "alpha" (outcome sensitivity), "cons" (response consistency), and "lambda" (loss aversion). +Hierarchical Bayesian Modeling of the Iowa Gambling Task with the following parameters: + "A" (decay rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion). -\strong{MODEL:} -Prospect Valence Learning (PVL) Decay-RI (Ahn et al., 2014, Frontiers in Psychology) +\strong{MODEL:} Prospect Valence Learning (PVL) Decay-RI (Ahn et al., 2014, Frontiers in Psychology) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Iowa Gambling Task, there should be four columns of data with the labels -"subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Iowa Gambling Task, there should be 4 columns of data with the + labels "subjID", "choice", "gain", "loss". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{A nominal integer representing which deck was chosen within the given trial (e.g. A, B, C, or D == 1, 2, 3, or 4 in the IGT).} - \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} - \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} + \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} + \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- igt_pvl_decay(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- igt_pvl_decay("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -121,13 +148,12 @@ printFit(output) } } \references{ -Ahn, W.-Y., Vasilev, G., Lee, S.-H., Busemeyer, J. R., Kruschke, J. K., Bechara, A., & Vassileva, J. (2014). Decision-making -in stimulant and opiate addicts in protracted abstinence: evidence from computational modeling with pure users. Frontiers in -Psychology, 5, 1376. http://doi.org/10.3389/fpsyg.2014.00849 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Ahn, W.-Y., Vasilev, G., Lee, S.-H., Busemeyer, J. R., Kruschke, J. K., Bechara, A., & Vassileva, + J. (2014). Decision-making in stimulant and opiate addicts in protracted abstinence: evidence + from computational modeling with pure users. Frontiers in Psychology, 5, 1376. + http://doi.org/10.3389/fpsyg.2014.00849 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/igt_pvl_delta.Rd b/man/igt_pvl_delta.Rd index 86140e13..2d0a327f 100644 --- a/man/igt_pvl_delta.Rd +++ b/man/igt_pvl_delta.Rd @@ -4,111 +4,138 @@ \alias{igt_pvl_delta} \title{Iowa Gambling Task (Ahn et al., 2008)} \usage{ -igt_pvl_delta(data = "choose", niter = 3000, nwarmup = 1000, +igt_pvl_delta(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", payscale = 100, saveDir = NULL, - modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, - adapt_delta = 0.95, stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "gain", "loss". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{payscale}{Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model ("igt_pvl_delta").} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("igt_pvl_delta").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Iowa Gambling Task using the following parameters: "A" (learning rate), "alpha" (outcome sensitivity), "cons" (response consistency), and "lambda" (loss aversion). +Hierarchical Bayesian Modeling of the Iowa Gambling Task with the following parameters: + "A" (learning rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion). -\strong{MODEL:} -Prospect Valence Learning (PVL) Delta (Ahn et al., 2008, Cognitive Science) +\strong{MODEL:} Prospect Valence Learning (PVL) Delta (Ahn et al., 2008, Cognitive Science) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Iowa Gambling Task, there should be four columns of data with the labels -"subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Iowa Gambling Task, there should be 4 columns of data with the + labels "subjID", "choice", "gain", "loss". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{A nominal integer representing which deck was chosen within the given trial (e.g. A, B, C, or D == 1, 2, 3, or 4 in the IGT).} - \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} - \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} + \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} + \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- igt_pvl_delta(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- igt_pvl_delta("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -121,12 +148,11 @@ printFit(output) } } \references{ -Ahn, W. Y., Busemeyer, J. R., & Wagenmakers, E. J. (2008). Comparison of decision learning models using the generalization -criterion method. Cognitive Science, 32(8), 1376-1402. http://doi.org/10.1080/03640210802352992 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Ahn, W. Y., Busemeyer, J. R., & Wagenmakers, E. J. (2008). Comparison of decision learning models + using the generalization criterion method. Cognitive Science, 32(8), 1376-1402. + http://doi.org/10.1080/03640210802352992 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/igt_vpp.Rd b/man/igt_vpp.Rd index 68f9c518..dd513f7d 100644 --- a/man/igt_vpp.Rd +++ b/man/igt_vpp.Rd @@ -4,111 +4,137 @@ \alias{igt_vpp} \title{Iowa Gambling Task} \usage{ -igt_vpp(data = "choose", niter = 3000, nwarmup = 1000, nchain = 4, +igt_vpp(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - payscale = 100, saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "gain", and "loss". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "gain", "loss". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{payscale}{Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{payscale}: Raw payoffs within data are divided by this number. Used for scaling data. Defaults to 100.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model ("igt_vpp").} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("igt_vpp").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Iowa Gambling Task using the following parameters: "A" (learning rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion), "epP" (gain impact), "epN" (loss impact), "K" (decay rate), and "w" (RL weight). +Hierarchical Bayesian Modeling of the Iowa Gambling Task with the following parameters: + "A" (learning rate), "alpha" (outcome sensitivity), "cons" (response consistency), "lambda" (loss aversion), "epP" (gain impact), "epN" (loss impact), "K" (decay rate), "w" (RL weight). -\strong{MODEL:} -Value-Plus-Perseverance (Worthy et al., 2014, Frontiers in Psychology) +\strong{MODEL:} Value-Plus-Perseverance (Worthy et al., 2013, Frontiers in Psychology) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Iowa Gambling Task, there should be four columns of data with the labels -"subjID", "choice", "gain", and "loss". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Iowa Gambling Task, there should be 4 columns of data with the + labels "subjID", "choice", "gain", "loss". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{A nominal integer representing which deck was chosen within the given trial (e.g. A, B, C, or D == 1, 2, 3, or 4 in the IGT).} - \item{\code{"gain"}}{A floating number representing the amount of currency won on the given trial (e.g. 50, 50, 100).} - \item{\code{"loss"}}{A floating number representing the amount of currency lost on the given trial (e.g. 0, -50).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer indicating which deck was chosen on that trial (where A==1, B==2, C==3, and D==4).} + \item{"gain"}{Floating point value representing the amount of currency won on that trial (e.g. 50, 100).} + \item{"loss"}{Floating point value representing the amount of currency lost on that trial (e.g. 0, -50).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- igt_vpp(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- igt_vpp("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -121,12 +147,11 @@ printFit(output) } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Worthy, D. A., & Todd Maddox, W. (2014). A comparison model of reinforcement-learning and win-stay-lose-shift decision-making -processes: A tribute to W.K. Estes. Journal of Mathematical Psychology, 59, 41-49. http://doi.org/10.1016/j.jmp.2013.10.001 +Worthy, D. A., & Todd Maddox, W. (2013). A comparison model of reinforcement-learning and + win-stay-lose-shift decision-making processes: A tribute to W.K. Estes. Journal of Mathematical + Psychology, 59, 41-49. http://doi.org/10.1016/j.jmp.2013.10.001 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/peer_ocu.Rd b/man/peer_ocu.Rd index 0f2cf606..a770b258 100644 --- a/man/peer_ocu.Rd +++ b/man/peer_ocu.Rd @@ -2,117 +2,140 @@ % Please edit documentation in R/peer_ocu.R \name{peer_ocu} \alias{peer_ocu} -\title{Peer influence task (Chung et al., 2015 Nature Neuroscience)} +\title{Peer Influence Task (Chung et al., 2015, Nature Neuroscience)} \usage{ peer_ocu(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, - ncore = 1, nthin = 1, inits = "fixed", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + ncore = 1, nthin = 1, inits = "random", indPars = "mean", + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "condition", "p_gamble", "safe_Hpayoff", "safe_Lpayoff", "risky_Hpayoff", "risky_Lpayoff", "choice". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("peer_ocu").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Peer Influence Task with the following parameters: "rho" (risk preference), "tau" (inverse temperature), and "ocu" (other-conferred utility).\cr\cr +Hierarchical Bayesian Modeling of the Peer Influence Task with the following parameters: + "rho" (risk preference), "tau" (inverse temperature), "ocu" (other-conferred utility). Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Peer influence task - OCU (other-conferred utility) model +\strong{MODEL:} Other-Conferred Utility (OCU) Model } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Risk Aversion Task, there should be four columns of data with the labels -"subjID", "condition", "p_gamble", "safe_Hpayoff", "safe_Lpayoff", "risky_Hpayoff", "risky_Lpayoff", "choice". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Peer Influence Task, there should be 8 columns of data with the + labels "subjID", "condition", "p_gamble", "safe_Hpayoff", "safe_Lpayoff", "risky_Hpayoff", "risky_Lpayoff", "choice". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"condition"}}{0: solo, 1: info (safe/safe), 2: info (mix), 3: info (risky/risky)} - \item{\code{"p_gamble"}}{Probability of receiving a high payoff (same for both options)} - \item{\code{"safe_Hpayoff"}}{High payoff of the safe option} - \item{\code{"safe_Lpayoff"}}{Low payoff of the safe option} - \item{\code{"risky_Hpayoff"}}{High payoff of the risky option} - \item{\code{"risky_Lpayoff"}}{Low payoff of the risky option} - \item{\code{"choice"}}{Which option was chosen? 0: safe 1: risky} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"condition"}{0: solo, 1: info (safe/safe), 2: info (mix), 3: info (risky/risky).} + \item{"p_gamble"}{Probability of receiving a high payoff (same for both options).} + \item{"safe_Hpayoff"}{High payoff of the safe option.} + \item{"safe_Lpayoff"}{Low payoff of the safe option.} + \item{"risky_Hpayoff"}{High payoff of the risky option.} + \item{"risky_Lpayoff"}{Low payoff of the risky option.} + \item{"choice"}{Which option was chosen? 0: safe, 1: risky.} + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- peer_ocu(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- peer_ocu("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -122,14 +145,14 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) - - } } \references{ -Chung, D., Christopoulos, G. I., King-Casas, B., Ball, S. B., & Chiu, P. H. (2015). Social signals of safety and risk confer utility and have asymmetric effects on observers' choices. -Nature neuroscience, 18(6), 912-916. +Chung, D., Christopoulos, G. I., King-Casas, B., Ball, S. B., & Chiu, P. H. (2015). Social + signals of safety and risk confer utility and have asymmetric effects on observers' choices. + Nature Neuroscience, 18(6), 912-916. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/prl_ewa.Rd b/man/prl_ewa.Rd index cd6e34f4..36711a99 100644 --- a/man/prl_ewa.Rd +++ b/man/prl_ewa.Rd @@ -4,110 +4,138 @@ \alias{prl_ewa} \title{Probabilistic Reversal Learning Task} \usage{ -prl_ewa(data = "choice", niter = 3000, nwarmup = 1000, nchain = 1, +prl_ewa(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "ev_c", "ev_nc", "ew_c", "ew_nc".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"prl_ewa"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("prl_ewa").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "phi" (1 - learning rate), "rho" (experience decay factor), and "beta" (inverse temperature). +Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task with the following parameters: + "phi" (1 - learning rate), "rho" (experience decay factor), "beta" (inverse temperature). -Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +Contributor: (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Experience-Weighted Attraction Model (Ouden et al., 2013, Neuron) +\strong{MODEL:} Experience-Weighted Attraction Model (Ouden et al., 2013, Neuron) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Reversal Learning Task, there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} - \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- prl_ewa(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- prl_ewa("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -120,12 +148,11 @@ printFit(output) } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 +Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. + (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), + 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/prl_fictitious.Rd b/man/prl_fictitious.Rd index f32eabbf..8bb8b288 100644 --- a/man/prl_fictitious.Rd +++ b/man/prl_fictitious.Rd @@ -4,110 +4,139 @@ \alias{prl_fictitious} \title{Probabilistic Reversal Learning Task} \usage{ -prl_fictitious(data = "choice", niter = 3000, nwarmup = 1000, - nchain = 1, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) +prl_fictitious(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "ev_c", "ev_nc", "pe_c", "pe_nc", "dv".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"prl_fictitious"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("prl_fictitious").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature). +Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task with the following parameters: + "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature). -Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +Contributor: (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) +\strong{MODEL:} Fictitious Update Model (Glascher et al., 2009, Cerebral Cortex) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Reversal Learning Task, there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} - \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- prl_fictitious(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- prl_fictitious("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -120,12 +149,11 @@ printFit(output) } } \references{ -Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding -Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial + Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. + Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/prl_fictitious_multipleB.Rd b/man/prl_fictitious_multipleB.Rd index 8b995da5..c7bb4c61 100644 --- a/man/prl_fictitious_multipleB.Rd +++ b/man/prl_fictitious_multipleB.Rd @@ -2,107 +2,141 @@ % Please edit documentation in R/prl_fictitious_multipleB.R \name{prl_fictitious_multipleB} \alias{prl_fictitious_multipleB} -\title{Probabilistic Reversal Learning Task (Glascher et al, 2008), multiple blocks per subject} +\title{Probabilistic Reversal Learning Task} \usage{ -prl_fictitious_multipleB(data = "choice", niter = 3000, - nwarmup = 1000, nchain = 1, ncore = 1, nthin = 1, - inits = "random", indPars = "mean", saveDir = NULL, - modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, - adapt_delta = 0.95, stepsize = 1, max_treedepth = 10) +prl_fictitious_multipleB(data = "choose", niter = 4000, + nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, + inits = "random", indPars = "mean", modelRegressor = FALSE, + vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, + stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "outcome", adn "block". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "block", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "ev_c", "ev_nc", "pe_c", "pe_nc", "dv".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{'hBayesDM'} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model ("prl_fictitious_multipleB").} - \item{\code{allIndPars}}{\code{'data.frame'} containing the summarized parameter - values (as specified by \code{'indPars'}) for each subject.} - \item{\code{parVals}}{A \code{'list'} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{'stanfit'} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("prl_fictitious_multipleB").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature). +Multiple-Block Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task with the following parameters: + "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature). -Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +Contributor: (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) +\strong{MODEL:} Fictitious Update Model (Glascher et al., 2009, Cerebral Cortex) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -with the labels "subjID", "choice", and "rewlos". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Reversal Learning Task, there should be 4 columns of data with the + labels "subjID", "block", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} - \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} - \item{\code{"block"}}{An integer value representing the block number of the current trial (e.g., 1 1 1 2 2 2).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"block"}{A unique identifier for each of the multiple blocks within each subject.} + \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \strong{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \strong{nthin} is equal to 1, hence every sample is used to -generate the posterior. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- prl_fictitious_multipleB(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- prl_fictitious_multipleB("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -114,3 +148,12 @@ plot(output) printFit(output) } } +\references{ +Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial + Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. + Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 +} +\seealso{ +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} +} diff --git a/man/prl_fictitious_rp.Rd b/man/prl_fictitious_rp.Rd index a5d9e19b..4a2b181e 100644 --- a/man/prl_fictitious_rp.Rd +++ b/man/prl_fictitious_rp.Rd @@ -4,110 +4,139 @@ \alias{prl_fictitious_rp} \title{Probabilistic Reversal Learning Task} \usage{ -prl_fictitious_rp(data = "choice", niter = 3000, nwarmup = 1000, - nchain = 1, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) +prl_fictitious_rp(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "ev_c", "ev_nc", "pe_c", "pe_nc", "dv".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"prl_fictitious"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("prl_fictitious_rp").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta_pos" (learning rate, +PE), "eta_neg" (learning rate, -PE), "alpha" (indecision point), "beta" (inverse temperature). +Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task with the following parameters: + "eta_pos" (learning rate, +PE), "eta_neg" (learning rate, -PE), "alpha" (indecision point), "beta" (inverse temperature). -Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +Contributor: (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) + separate learning rates for + and - prediction error (PE) +\strong{MODEL:} Fictitious Update Model (Glascher et al., 2009, Cerebral Cortex), with separate learning rates for positive and negative prediction error (PE) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Reversal Learning Task, there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} - \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- prl_fictitious_rp(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- prl_fictitious_rp("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -120,15 +149,15 @@ printFit(output) } } \references{ -Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding -Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 +Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial + Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. + Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 +Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. + (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), + 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/prl_fictitious_rp_woa.Rd b/man/prl_fictitious_rp_woa.Rd index 72807746..6c9dc44f 100644 --- a/man/prl_fictitious_rp_woa.Rd +++ b/man/prl_fictitious_rp_woa.Rd @@ -4,110 +4,139 @@ \alias{prl_fictitious_rp_woa} \title{Probabilistic Reversal Learning Task} \usage{ -prl_fictitious_rp_woa(data = "choice", niter = 3000, nwarmup = 1000, - nchain = 1, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) +prl_fictitious_rp_woa(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "ev_c", "ev_nc", "pe_c", "pe_nc", "dv".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"prl_fictitious"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("prl_fictitious_rp_woa").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta_pos" (learning rate, +PE), "eta_neg" (learning rate, -PE), "alpha" (indecision point), "beta" (inverse temperature). +Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task with the following parameters: + "eta_pos" (learning rate, +PE), "eta_neg" (learning rate, -PE), "beta" (inverse temperature). -Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +Contributor: (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) + separate learning rates for + and - prediction error (PE) without alpha (indecision point) +\strong{MODEL:} Fictitious Update Model (Glascher et al., 2009, Cerebral Cortex), with separate learning rates for positive and negative prediction error (PE), without alpha (indecision point) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Reversal Learning Task, there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} - \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- prl_fictitious_rp_woa(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- prl_fictitious_rp_woa("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -120,15 +149,15 @@ printFit(output) } } \references{ -Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding -Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 +Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial + Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. + Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 +Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. + (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), + 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/prl_fictitious_woa.Rd b/man/prl_fictitious_woa.Rd index ca274cef..19e47571 100644 --- a/man/prl_fictitious_woa.Rd +++ b/man/prl_fictitious_woa.Rd @@ -4,110 +4,139 @@ \alias{prl_fictitious_woa} \title{Probabilistic Reversal Learning Task} \usage{ -prl_fictitious_woa(data = "choice", niter = 3000, nwarmup = 1000, - nchain = 1, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) +prl_fictitious_woa(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "ev_c", "ev_nc", "pe_c", "pe_nc", "dv".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"prl_fictitious"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("prl_fictitious_woa").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "eta" (learning rate), "alpha" (indecision point), "beta" (inverse temperature). +Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task with the following parameters: + "eta" (learning rate), "beta" (inverse temperature). -Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +Contributor: (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Fictitious Update Model (Glascher et al., 2008, Cerebral Cortex) without alpha (indecision point) +\strong{MODEL:} Fictitious Update Model (Glascher et al., 2009, Cerebral Cortex), without alpha (indecision point) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Reversal Learning Task, there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} - \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- prl_fictitious_woa(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- prl_fictitious_woa("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -120,12 +149,11 @@ printFit(output) } } \references{ -Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial Prefrontal Cortex in Encoding -Action-Based Value Signals During Reward-Related Decision Making. Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Glascher, J., Hampton, A. N., & O'Doherty, J. P. (2009). Determining a Role for Ventromedial + Prefrontal Cortex in Encoding Action-Based Value Signals During Reward-Related Decision Making. + Cerebral Cortex, 19(2), 483-495. http://doi.org/10.1093/cercor/bhn098 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/prl_rp.Rd b/man/prl_rp.Rd index bb9cbad0..548f2c78 100644 --- a/man/prl_rp.Rd +++ b/man/prl_rp.Rd @@ -4,110 +4,138 @@ \alias{prl_rp} \title{Probabilistic Reversal Learning Task} \usage{ -prl_rp(data = "choice", niter = 3000, nwarmup = 1000, nchain = 1, +prl_rp(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "ev_c", "ev_nc", "pe".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"prl_rp"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("prl_rp").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "Apun" (punishment learning rate), "Arew" (reward learning rate), and "beta" (inverse temperature). +Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task with the following parameters: + "Apun" (punishment learning rate), "Arew" (reward learning rate), "beta" (inverse temperature). -Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +Contributor: (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Reward-Punishment Model (Ouden et al., 2013, Neuron) +\strong{MODEL:} Reward-Punishment Model (Ouden et al., 2013, Neuron) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Reversal Learning Task, there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} - \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- prl_rp(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- prl_rp("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -120,12 +148,11 @@ printFit(output) } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 +Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. + (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), + 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/prl_rp_multipleB.Rd b/man/prl_rp_multipleB.Rd index f4bdd822..a0d76318 100644 --- a/man/prl_rp_multipleB.Rd +++ b/man/prl_rp_multipleB.Rd @@ -2,113 +2,141 @@ % Please edit documentation in R/prl_rp_multipleB.R \name{prl_rp_multipleB} \alias{prl_rp_multipleB} -\title{Probabilistic Reversal Learning Task, multiple blocks per subject} +\title{Probabilistic Reversal Learning Task} \usage{ -prl_rp_multipleB(data = "choice", niter = 3000, nwarmup = 1000, - nchain = 1, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) +prl_rp_multipleB(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice", "outcome", and "block". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "block", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +For this model they are: "ev_c", "ev_nc", "pe".} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"prl_rp_multipleB"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("prl_rp_multipleB").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + \item{\code{modelRegressor}}{List object containing the extracted model-based regressors.} } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning (PRL) Task using the following parameters: "Apun" (punishment learning rate), "Arew" (reward learning rate), and "beta" (inverse temperature). +Multiple-Block Hierarchical Bayesian Modeling of the Probabilistic Reversal Learning Task with the following parameters: + "Apun" (punishment learning rate), "Arew" (reward learning rate), "beta" (inverse temperature). -Contributor (for model-based regressors): \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} +Contributor: (for model-based regressors) \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} and \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Reward-Punishment Model (Ouden et al., 2013, Neuron) +\strong{MODEL:} Reward-Punishment Model (Ouden et al., 2013, Neuron) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Reversal Learning Task, there should be three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Reversal Learning Task, there should be 4 columns of data with the + labels "subjID", "block", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option within the given trial (e.g., 1 or 2 in PRL).} - \item{\code{"outcome"}}{A 1 or -1 for outcome within each given trial (1 = reward, -1 = loss).} - \item{\code{"block"}}{An integer value representing the block number of the current trial (e.g., 1 1 1 2 2 2).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"block"}{A unique identifier for each of the multiple blocks within each subject.} + \item{"choice"}{Integer value representing the option chosen on that trial: 1 or 2.} + \item{"outcome"}{Integer value representing the outcome of that trial (where reward == 1, and loss == -1).} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- prl_rp_multipleB(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- prl_rp_multipleB("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -121,12 +149,11 @@ printFit(output) } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. (2013). Dissociable -Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 +Ouden, den, H. E. M., Daw, N. D., Fernandez, G., Elshout, J. A., Rijpkema, M., Hoogman, M., et al. + (2013). Dissociable Effects of Dopamine and Serotonin on Reversal Learning. Neuron, 80(4), + 1090-1100. http://doi.org/10.1016/j.neuron.2013.08.030 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/pst_gainloss_Q.Rd b/man/pst_gainloss_Q.Rd index f23ebd40..6f0bb0bb 100644 --- a/man/pst_gainloss_Q.Rd +++ b/man/pst_gainloss_Q.Rd @@ -4,133 +4,139 @@ \alias{pst_gainloss_Q} \title{Probabilistic Selection Task} \usage{ -pst_gainloss_Q(data = "choose", niter = 2000, nwarmup = 1000, - nchain = 1, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) +pst_gainloss_Q(data = "choose", niter = 4000, nwarmup = 1000, + nchain = 4, ncore = 1, nthin = 1, inits = "random", + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. -Data columns should be labelled as follows: -"subjID", "type", "choice", and "reward". -See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "type", "choice", "reward". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"pst_gainloss_Q"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("pst_gainloss_Q").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Probabilistic Selection Task -with the following parameters: -"alpha_pos" (Learning rate for positive feedbacks), -"alpha_neg" (Learning rate for negative feedbacks), and -"beta" (inverse temperature). +Hierarchical Bayesian Modeling of the Probabilistic Selection Task with the following parameters: + "alpha_pos" (learning rate for positive feedbacks), "alpha_neg" (learning rate for negative feedbacks), "beta" (inverse temperature). Contributor: \href{https://ccs-lab.github.io/team/jaeyeong-yang/}{Jaeyeong Yang} -\strong{MODEL:} -Gain-loss Q learning model (Frank et al., 2007) +\strong{MODEL:} Gain-Loss Q Learning Model (Frank et al., 2007, PNAS) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Probabilistic Selection Task, there should be four columns of data with the labels -"subjID", "type", "choice", and "reward". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Probabilistic Selection Task, there should be 4 columns of data with the + labels "subjID", "type", "choice", "reward". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"type"}}{The type of stimuli in the trial. For given 6 stimuli, \code{"type"} should be given - in a form as \code{"option1""option2"}, e.g., \code{12}, \code{34}, \code{56}. - - The code for each option should be defined as below: - \tabular{ccl}{ - Code \tab Stimulus \tab Probability to win \cr - \code{1} \tab A \tab 80\% \cr - \code{2} \tab B \tab 20\% \cr - \code{3} \tab C \tab 70\% \cr - \code{4} \tab D \tab 30\% \cr - \code{5} \tab E \tab 60\% \cr - \code{6} \tab F \tab 40\% - } - The function will work even if you use different probabilities for stimuli, - but the total number of stimuli should be less than or equal to 6. - } - \item{\code{"choice"}}{Whether a subject choose the left option between given two options.} - \item{\code{"reward"}}{Amount of reward as a result of the choice.} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"type"}{Two-digit number indicating which pair of stimuli were presented for that trial, e.g. \code{12}, \code{34}, or \code{56}. The digit on the left (tens-digit) indicates the presented stimulus for option1, while the digit on the right (ones-digit) indicates that for option2.\cr Code for each stimulus type (1~6) is defined as below: \tabular{ccl}{Code \tab Stimulus \tab Probability to win \cr \code{1} \tab A \tab 80\% \cr \code{2} \tab B \tab 20\% \cr \code{3} \tab C \tab 70\% \cr \code{4} \tab D \tab 30\% \cr \code{5} \tab E \tab 60\% \cr \code{6} \tab F \tab 40\%} The modeling will still work even if different probabilities are used for the stimuli; however, the total number of stimuli should be less than or equal to 6.} + \item{"choice"}{Whether the subject chose the left option (option1) out of the given two options (i.e. if option1 was chosen, 1; if option2 was chosen, 0).} + \item{"reward"}{Amount of reward earned as a result of the trial.} + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- pst_gainloss_Q(data = "example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) +output <- pst_gainloss_Q("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -143,10 +149,11 @@ printFit(output) } } \references{ -Frank, M. J., Moustafa, A. A., Haughey, H. M., Curran, T., & Hutchison, K. E. (2007). -Genetic triple dissociation reveals multiple roles for dopamine in reinforcement learning. -Proceedings of the National Academy of Sciences, 104(41), 16311-16316. +Frank, M. J., Moustafa, A. A., Haughey, H. M., Curran, T., & Hutchison, K. E. (2007). Genetic + triple dissociation reveals multiple roles for dopamine in reinforcement learning. Proceedings + of the National Academy of Sciences, 104(41), 16311-16316. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/ra_noLA.Rd b/man/ra_noLA.Rd index 260692f0..aa936308 100644 --- a/man/ra_noLA.Rd +++ b/man/ra_noLA.Rd @@ -6,108 +6,134 @@ \usage{ ra_noLA(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "gain", "loss", "cert", "gamble". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("ra_noLA").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: "rho" (risk aversion) and "tau" (inverse temp). +Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: + "rho" (risk aversion), "tau" (inverse temperature). -\strong{MODEL:} -Prospect Theory without a loss aversion (LA) parameter +\strong{MODEL:} Prospect Theory (Sokol-Hessner et al., 2009, PNAS), without loss aversion (LA) parameter } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Risk Aversion Task, there should be four columns of data with the labels -"subjID", "riskyGain", "riskyLoss", and "safeOption". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Risk Aversion Task, there should be 5 columns of data with the + labels "subjID", "gain", "loss", "cert", "gamble". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).} - \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} - \item{\code{"cert"}}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} - \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"gain"}{Possible (50\%) gain outcome of a risky option (e.g. 9).} + \item{"loss"}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} + \item{"cert"}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} + \item{"gamble"}{If gamble was taken, gamble == 1; else gamble == 0.} + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- ra_noLA(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- ra_noLA("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,18 +143,21 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) +} - +\dontrun{ # Paths to data published in Sokol-Hessner et al. (2009) -path_to_attend_data = system.file("extdata/ra_data_attend.txt", package = "hBayesDM") - -path_to_regulate_data = system.file("extdata/ra_data_reappraisal.txt", package = "hBayesDM") +path_to_attend_data <- system.file("extdata", "ra_data_attend.txt", package = "hBayesDM") +path_to_regulate_data <- system.file("extdata", "ra_data_reappraisal.txt", package = "hBayesDM") } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & + Smith, E. E. (2009). Thinking like a Trader Selectively Reduces Individuals' Loss Aversion. + Proceedings of the National Academy of Sciences of the United States of America, 106(13), + 5035-5040. http://www.pnas.org/content/106/13/5035 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/ra_noRA.Rd b/man/ra_noRA.Rd index f6903758..060a39d0 100644 --- a/man/ra_noRA.Rd +++ b/man/ra_noRA.Rd @@ -6,108 +6,134 @@ \usage{ ra_noRA(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "gain", "loss", "cert", "gamble". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("ra_noRA").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: "lambda" (loss aversion) and "tau" (inverse temp). +Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: + "lambda" (loss aversion), "tau" (inverse temperature). -\strong{MODEL:} -Prospect Theory without a risk aversion (RA) parameter +\strong{MODEL:} Prospect Theory (Sokol-Hessner et al., 2009, PNAS), without risk aversion (RA) parameter } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Risk Aversion Task, there should be four columns of data with the labels -"subjID", "riskyGain", "riskyLoss", and "safeOption". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Risk Aversion Task, there should be 5 columns of data with the + labels "subjID", "gain", "loss", "cert", "gamble". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).} - \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} - \item{\code{"cert"}}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} - \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"gain"}{Possible (50\%) gain outcome of a risky option (e.g. 9).} + \item{"loss"}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} + \item{"cert"}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} + \item{"gamble"}{If gamble was taken, gamble == 1; else gamble == 0.} + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- ra_noRA(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- ra_noRA("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,18 +143,21 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) +} - +\dontrun{ # Paths to data published in Sokol-Hessner et al. (2009) -path_to_attend_data = system.file("extdata/ra_data_attend.txt", package = "hBayesDM") - -path_to_regulate_data = system.file("extdata/ra_data_reappraisal.txt", package = "hBayesDM") +path_to_attend_data <- system.file("extdata", "ra_data_attend.txt", package = "hBayesDM") +path_to_regulate_data <- system.file("extdata", "ra_data_reappraisal.txt", package = "hBayesDM") } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & + Smith, E. E. (2009). Thinking like a Trader Selectively Reduces Individuals' Loss Aversion. + Proceedings of the National Academy of Sciences of the United States of America, 106(13), + 5035-5040. http://www.pnas.org/content/106/13/5035 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/ra_prospect.Rd b/man/ra_prospect.Rd index ad30dcfd..dea44925 100644 --- a/man/ra_prospect.Rd +++ b/man/ra_prospect.Rd @@ -6,108 +6,135 @@ \usage{ ra_prospect(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", and "gamble". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "gain", "loss", "cert", "gamble". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ra_prospect"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("ra_prospect").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: "rho" (risk aversion), "lambda" (loss aversion), and "tau" (inverse temp). +Hierarchical Bayesian Modeling of the Risk Aversion Task with the following parameters: + "rho" (risk aversion), "lambda" (loss aversion), "tau" (inverse temperature). -\strong{MODEL:} -Prospect Theory (Sokol-Hessner et al., 2009, PNAS) +\strong{MODEL:} Prospect Theory (Sokol-Hessner et al., 2009, PNAS) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Risk Aversion Task, there should be four columns of data with the labels -"subjID", "riskyGain", "riskyLoss", and "safeOption". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Risk Aversion Task, there should be 5 columns of data with the + labels "subjID", "gain", "loss", "cert", "gamble". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).} - \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} - \item{\code{"cert"}}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} - \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"gain"}{Possible (50\%) gain outcome of a risky option (e.g. 9).} + \item{"loss"}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} + \item{"cert"}{Guaranteed amount of a safe option. "cert" is assumed to be zero or greater than zero.} + \item{"gamble"}{If gamble was taken, gamble == 1; else gamble == 0.} + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- ra_prospect(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- ra_prospect("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -117,22 +144,21 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) +} - +\dontrun{ # Paths to data published in Sokol-Hessner et al. (2009) -path_to_attend_data = system.file("extdata/ra_data_attend.txt", package = "hBayesDM") - -path_to_regulate_data = system.file("extdata/ra_data_reappraisal.txt", package = "hBayesDM") +path_to_attend_data <- system.file("extdata", "ra_data_attend.txt", package = "hBayesDM") +path_to_regulate_data <- system.file("extdata", "ra_data_reappraisal.txt", package = "hBayesDM") } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & Smith, E. E. (2009). Thinking like -a Trader Selectively Reduces Individuals' Loss Aversion. Proceedings of the National Academy of Sciences of the United States -of America, 106(13), 5035-5040. http://doi.org/10.2307/40455144?ref = search-gateway:1f452c8925000031ef87ca756455c9e3 +Sokol-Hessner, P., Hsu, M., Curley, N. G., Delgado, M. R., Camerer, C. F., Phelps, E. A., & + Smith, E. E. (2009). Thinking like a Trader Selectively Reduces Individuals' Loss Aversion. + Proceedings of the National Academy of Sciences of the United States of America, 106(13), + 5035-5040. http://www.pnas.org/content/106/13/5035 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/rdt_happiness.Rd b/man/rdt_happiness.Rd index b5fcd904..30067294 100644 --- a/man/rdt_happiness.Rd +++ b/man/rdt_happiness.Rd @@ -6,114 +6,137 @@ \usage{ rdt_happiness(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", - indPars = "mean", saveDir = NULL, modelRegressor = FALSE, - vb = FALSE, inc_postpred = FALSE, adapt_delta = 0.95, - stepsize = 1, max_treedepth = 10) + indPars = "mean", modelRegressor = FALSE, vb = FALSE, + inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, + max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", and "RT_happy". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", "RT_happy". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"rdt_happiness"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("rdt_happiness").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Risky Decision Task (Rutledge et al., 2014, PNAS) with the following parameters: "w0" (baseline), "w1" (weight of certain rewards), "w2" (weight of expected values), "w3" (weight of reward prediction errors), "gamma" (forgetting factor),and "sig" (standard deviation of error). +Hierarchical Bayesian Modeling of the Risky Decision Task with the following parameters: + "w0" (baseline), "w1" (weight of certain rewards), "w2" (weight of expected values), "w3" (weight of reward prediction errors), "gam" (forgetting factor), "sig" (standard deviation of error). Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Happiness Computational Model (Rutledge et al., 2014, PNAS) +\strong{MODEL:} Happiness Computational Model (Rutledge et al., 2014, PNAS) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Risky Decision Task, there should be nine columns of data with the labels -"subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", and "RT_happy". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Risky Decision Task, there should be 9 columns of data with the + labels "subjID", "gain", "loss", "cert", "type", "gamble", "outcome", "happy", "RT_happy". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"gain"}}{Possible (50\%) gain outcome of a risky option (e.g. 9).} - \item{\code{"loss"}}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} - \item{\code{"cert"}}{Guaranteed amount of a safe option.} - \item{\code{"type"}}{loss == -1, mixed == 0, gain == 1} - \item{\code{"gamble"}}{If gamble was taken, gamble == 1, else gamble == 0.} - \item{\code{"outcome"}}{The result of the chosen option.} - \item{\code{"happy"}}{The happiness score.} - \item{\code{"RT_happy"}}{The reaction time of the happiness trial.} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"gain"}{Possible (50\%) gain outcome of a risky option (e.g. 9).} + \item{"loss"}{Possible (50\%) loss outcome of a risky option (e.g. 5, or -5).} + \item{"cert"}{Guaranteed amount of a safe option.} + \item{"type"}{loss == -1, mixed == 0, gain == 1} + \item{"gamble"}{If gamble was taken, gamble == 1; else gamble == 0.} + \item{"outcome"}{Result of the trial.} + \item{"happy"}{Happiness score.} + \item{"RT_happy"}{Reaction time for answering the happiness score.} } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- rdt_happiness(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- rdt_happiness("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -123,15 +146,14 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) - - - } } \references{ -Rutledge, R. B., Skandali, N., Dayan, P., & Dolan, R. J. (2014). A computational and neural model of momentary subjective well-being. -Proceedings of the National Academy of Sciences, 111(33), 12252-12257. +Rutledge, R. B., Skandali, N., Dayan, P., & Dolan, R. J. (2014). A computational and neural model + of momentary subjective well-being. Proceedings of the National Academy of Sciences, 111(33), + 12252-12257. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/ts_par4.Rd b/man/ts_par4.Rd index 394d674f..effa03da 100644 --- a/man/ts_par4.Rd +++ b/man/ts_par4.Rd @@ -6,113 +6,137 @@ \usage{ ts_par4(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10, trans_prob = 0.7) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{trans_prob}{Common state transition probability from Stage (Level) 1 to Stage 2. Defaults to 0.7.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{trans_prob}: Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ts_par4"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("ts_par4").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Two-Step Task using the following 4 parameters: with the following parameters: "a" (learnign rate for both stages 1 and 2), "beta" (inverse temperature for both stages 1 and 2), "pi" (perseverance), and "w" (model-based weight).\cr\cr +Hierarchical Bayesian Modeling of the Two-Step Task with the following parameters: + "a" (learning rate for both stages 1 & 2), "beta" (inverse temperature for both stages 1 & 2), "pi" (perseverance), "w" (model-based weight). Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Hybrid model (Daw et al., 2011; Wunderlich et al, 2012) with four parameters +\strong{MODEL:} Hybrid Model (Daw et al., 2011; Wunderlich et al., 2012), with 4 parameters } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Two-Step Task, there should be four columns of data with the labels -"subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Two-Step Task, there should be 4 columns of data with the + labels "subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"level1_choice"}}{Choice of the level 1. 1: stimulus 1, 2: stimulus 2} - \item{\code{"level2_choice"}}{Choice of the level 2. 1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6.} - \item{\code{"reward"}}{Reward of the level 2 (0 or 1)} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"level1_choice"}{Choice made for Level (Stage) 1 (1: stimulus 1, 2: stimulus 2).} + \item{"level2_choice"}{Choice made for Level (Stage) 2 (1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6).\cr *Note that, in our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. Similarly, choosing stimulus 2 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. To change this default transition probability, set the function argument \code{trans_prob} to your preferred value.} + \item{"reward"}{Reward after Level 2 (0 or 1).} + + + + + } -\strong{*} Note: In our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. -Choosing stimulus 3 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. -The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- ts_par4(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- ts_par4("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -122,20 +146,17 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) - - } } \references{ -Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). Model-Based Influences on Humans' -Choices and Striatal Prediction Errors. Neuron, 69(6), 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 - -Wunderlich, K., Smittenaar, P., & Dolan, R. J. (2012). Dopamine enhances model-based over model-free choice behavior. -Neuron, 75(3), 418-424. +Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). + Model-Based Influences on Humans' Choices and Striatal Prediction Errors. Neuron, 69(6), + 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Wunderlich, K., Smittenaar, P., & Dolan, R. J. (2012). Dopamine enhances model-based over + model-free choice behavior. Neuron, 75(3), 418-424. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/ts_par6.Rd b/man/ts_par6.Rd index 3fe20068..7023e641 100644 --- a/man/ts_par6.Rd +++ b/man/ts_par6.Rd @@ -5,114 +5,138 @@ \title{Two-Step Task (Daw et al., 2011, Neuron)} \usage{ ts_par6(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, - ncore = 1, nthin = 1, inits = "fixed", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10, trans_prob = 0.7) + ncore = 1, nthin = 1, inits = "random", indPars = "mean", + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{trans_prob}{Common state transition probability from Stage (Level) 1 to Stage 2. Defaults to 0.7.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{trans_prob}: Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ts_par6"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("ts_par6").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Two-Step Task with the following 6 parameters: "a1" (learnign rate in stage 1), "a2" (learnign rate in stage 2), "beta1" (inverse temperature in stage 1), "beta2" (inverse temperature in stage 2), "pi" (perseverance), and "w" (model-based weight).\cr\cr +Hierarchical Bayesian Modeling of the Two-Step Task with the following parameters: + "a1" (learning rate in stage 1), "beta1" (inverse temperature in stage 1), "a2" (learning rate in stage 2), "beta2" (inverse temperature in stage 2), "pi" (perseverance), "w" (model-based weight). Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Hybrid model (Daw et al., 2011, Neuron) with 6 parameters +\strong{MODEL:} Hybrid Model (Daw et al., 2011, Neuron), with 6 parameters } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Two-Step Task, there should be four columns of data with the labels -"subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Two-Step Task, there should be 4 columns of data with the + labels "subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"level1_choice"}}{Choice of the level 1. 1: stimulus 1, 2: stimulus 2} - \item{\code{"level2_choice"}}{Choice of the level 2. 1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6.} - \item{\code{"reward"}}{Reward of the level 2 (0 or 1)} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"level1_choice"}{Choice made for Level (Stage) 1 (1: stimulus 1, 2: stimulus 2).} + \item{"level2_choice"}{Choice made for Level (Stage) 2 (1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6).\cr *Note that, in our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. Similarly, choosing stimulus 2 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. To change this default transition probability, set the function argument \code{trans_prob} to your preferred value.} + \item{"reward"}{Reward after Level 2 (0 or 1).} + + + + + } -\strong{*} Note: In our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. -Choosing stimulus 3 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. -The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- ts_par6(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- ts_par6("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -122,17 +146,14 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) - - } } \references{ -Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). Model-Based Influences on Humans' -Choices and Striatal Prediction Errors. Neuron, 69(6), 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). + Model-Based Influences on Humans' Choices and Striatal Prediction Errors. Neuron, 69(6), + 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/ts_par7.Rd b/man/ts_par7.Rd index 1d4db326..d4a0c3f2 100644 --- a/man/ts_par7.Rd +++ b/man/ts_par7.Rd @@ -5,114 +5,138 @@ \title{Two-Step Task (Daw et al., 2011, Neuron)} \usage{ ts_par7(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, - ncore = 1, nthin = 1, inits = "fixed", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10, trans_prob = 0.7) + ncore = 1, nthin = 1, inits = "random", indPars = "mean", + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "level1_choice", "level2_choice", "reward". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} - -\item{trans_prob}{Common state transition probability from Stage (Level) 1 to Stage 2. Defaults to 0.7.} +\item{...}{For this model, it's possible to set the following \strong{model-specific argument} to a value that you may prefer. \cr +\code{trans_prob}: Common state transition probability from Stage (Level) 1 to Stage (Level) 2. Defaults to 0.7.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ts_par7"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("ts_par7").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Two-Step Task with the following 7 parameters: "a1" (learnign rate in stage 1), "a2" (learnign rate in stage 2), "beta1" (inverse temperature in stage 1), "beta2" (inverse temperature in stage 2), "pi" (perseverance), "lambda" (eligibility trace), and "w" (model-based weight).\cr\cr +Hierarchical Bayesian Modeling of the Two-Step Task with the following parameters: + "a1" (learning rate in stage 1), "beta1" (inverse temperature in stage 1), "a2" (learning rate in stage 2), "beta2" (inverse temperature in stage 2), "pi" (perseverance), "w" (model-based weight), "lambda" (eligibility trace). Contributor: \href{https://ccs-lab.github.io/team/harhim-park/}{Harhim Park} -\strong{MODEL:} -Hybrid model (Daw et al., 2011, Neuron) with seven parameters (original model) +\strong{MODEL:} Hybrid Model (Daw et al., 2011, Neuron), with 7 parameters (original model) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Two-Step Task, there should be four columns of data with the labels -"subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this -particular order, however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Two-Step Task, there should be 4 columns of data with the + labels "subjID", "level1_choice", "level2_choice", "reward". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"level1_choice"}}{Choice of the level 1. 1: stimulus 1, 2: stimulus 2} - \item{\code{"level2_choice"}}{Choice of the level 2. 1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6.} - \item{\code{"reward"}}{Reward of the level 2 (0 or 1)} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"level1_choice"}{Choice made for Level (Stage) 1 (1: stimulus 1, 2: stimulus 2).} + \item{"level2_choice"}{Choice made for Level (Stage) 2 (1: stimulus 3, 2: stimulus 4, 3: stimulus 5, 4: stimulus 6).\cr *Note that, in our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. Similarly, choosing stimulus 2 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. To change this default transition probability, set the function argument \code{trans_prob} to your preferred value.} + \item{"reward"}{Reward after Level 2 (0 or 1).} + + + + + } -\strong{*} Note: In our notation, choosing stimulus 1 in Level 1 leads to stimulus 3 & 4 in Level 2 with a common (0.7 by default) transition. -Choosing stimulus 3 in Level 1 leads to stimulus 5 & 6 in Level 2 with a common (0.7 by default) transition. -The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial_number", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- ts_par7(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- ts_par7("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -122,17 +146,14 @@ plot(output) # Show the WAIC and LOOIC model fit estimates printFit(output) - - } } \references{ -Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). Model-Based Influences on Humans' -Choices and Striatal Prediction Errors. Neuron, 69(6), 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Daw, N. D., Gershman, S. J., Seymour, B., Ben Seymour, Dayan, P., & Dolan, R. J. (2011). + Model-Based Influences on Humans' Choices and Striatal Prediction Errors. Neuron, 69(6), + 1204-1215. http://doi.org/10.1016/j.neuron.2011.02.027 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/ug_bayes.Rd b/man/ug_bayes.Rd index 90b298d7..4bdb2538 100644 --- a/man/ug_bayes.Rd +++ b/man/ug_bayes.Rd @@ -4,108 +4,136 @@ \alias{ug_bayes} \title{Norm-Training Ultimatum Game} \usage{ -ug_bayes(data = "choose", niter = 3000, nwarmup = 1000, nchain = 4, +ug_bayes(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "offer", and "accept". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "offer", "accept". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ug_bayes"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("ug_bayes").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Norm-Training Ultimatum Game using the following parameters: "alpha" (envy), "Beta" (guilt), "tau" (inverse temperature). +Hierarchical Bayesian Modeling of the Norm-Training Ultimatum Game with the following parameters: + "alpha" (envy), "beta" (guilt), "tau" (inverse temperature). -\strong{MODEL:} -Ideal Observer Model (Xiang et al., 2013, J Neuro) +\strong{MODEL:} Ideal Observer Model (Xiang et al., 2013, J Neuro) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Norm-Training Ultimatum Game, there should be three columns of data -with the labels "subjID", "offer", and "accept". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Norm-Training Ultimatum Game, there should be 3 columns of data with the + labels "subjID", "offer", "accept". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"offer"}}{An real value representing the offer made within the given trial (e.g., 10, 11, 4, etc..).} - \item{\code{"accept"}}{A 1 or 0 indicating an offer was accepted or not (1 = accepted, 0 = rejected).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"offer"}{Floating point value representing the offer made in that trial (e.g. 4, 10, 11).} + \item{"accept"}{1 or 0, indicating whether the offer was accepted in that trial (where accepted == 1, rejected == 0).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- ug_bayes(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- ug_bayes("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -118,12 +146,11 @@ printFit(output) } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Xiang, T., Lohrenz, T., & Montague, P. R. (2013). Computational Substrates of Norms and Their Violations during Social Exchange. -Journal of Neuroscience, 33(3), 1099-1108. http://doi.org/10.1523/JNEUROSCI.1642-12.2013 +Xiang, T., Lohrenz, T., & Montague, P. R. (2013). Computational Substrates of Norms and Their + Violations during Social Exchange. Journal of Neuroscience, 33(3), 1099-1108. + http://doi.org/10.1523/JNEUROSCI.1642-12.2013 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/ug_delta.Rd b/man/ug_delta.Rd index 8c037883..138b0ea4 100644 --- a/man/ug_delta.Rd +++ b/man/ug_delta.Rd @@ -4,108 +4,136 @@ \alias{ug_delta} \title{Norm-Training Ultimatum Game} \usage{ -ug_delta(data = "choose", niter = 3000, nwarmup = 1000, nchain = 4, +ug_delta(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "offer", and "accept". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "offer", "accept". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"ug_delta"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("ug_delta").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Norm-Training Ultimatum Game using the following parameters: "alpha" (envy), "ep" (norm adaptation rate), "tau" (inverse temperature). +Hierarchical Bayesian Modeling of the Norm-Training Ultimatum Game with the following parameters: + "alpha" (envy), "tau" (inverse temperature), "ep" (norm adaptation rate). -\strong{MODEL:} -Rescorla-Wagner (delta) Model (Gu et al., 2015, J Neuro) +\strong{MODEL:} Rescorla-Wagner (Delta) Model (Gu et al., 2015, J Neuro) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Norm-Training Ultimatum Game, there should be three columns of data -with the labels "subjID", "offer", and "accept". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Norm-Training Ultimatum Game, there should be 3 columns of data with the + labels "subjID", "offer", "accept". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"offer"}}{An real value representing the offer made within the given trial (e.g., 10, 11, 4, etc..).} - \item{\code{"accept"}}{A 1 or 0 indicating an offer was accepted or not (1 = accepted, 0 = rejected).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"offer"}{Floating point value representing the offer made in that trial (e.g. 4, 10, 11).} + \item{"accept"}{1 or 0, indicating whether the offer was accepted in that trial (where accepted == 1, rejected == 0).} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "Reaction_Time", "trial", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- ug_delta(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- ug_delta("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -118,13 +146,12 @@ printFit(output) } } \references{ -Gu, X., Wang, X., Hula, A., Wang, S., Xu, S., Lohrenz, T. M., et al. (2015). Necessary, Yet Dissociable Contributions of the -Insular and Ventromedial Prefrontal Cortices to Norm Adaptation: Computational and Lesion Evidence in Humans. Journal of -Neuroscience, 35(2), 467-473. http://doi.org/10.1523/JNEUROSCI.2906-14.2015 - -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. +Gu, X., Wang, X., Hula, A., Wang, S., Xu, S., Lohrenz, T. M., et al. (2015). Necessary, Yet + Dissociable Contributions of the Insular and Ventromedial Prefrontal Cortices to Norm + Adaptation: Computational and Lesion Evidence in Humans. Journal of Neuroscience, 35(2), + 467-473. http://doi.org/10.1523/JNEUROSCI.2906-14.2015 } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/man/wcs_sql.Rd b/man/wcs_sql.Rd index 4f1b91b7..4e338be2 100644 --- a/man/wcs_sql.Rd +++ b/man/wcs_sql.Rd @@ -4,110 +4,138 @@ \alias{wcs_sql} \title{Wisconsin Card Sorting Task} \usage{ -wcs_sql(data = "choice", niter = 3000, nwarmup = 1000, nchain = 1, +wcs_sql(data = "choose", niter = 4000, nwarmup = 1000, nchain = 4, ncore = 1, nthin = 1, inits = "random", indPars = "mean", - saveDir = NULL, modelRegressor = FALSE, vb = FALSE, - inc_postpred = FALSE, adapt_delta = 0.95, stepsize = 1, - max_treedepth = 10) + modelRegressor = FALSE, vb = FALSE, inc_postpred = FALSE, + adapt_delta = 0.95, stepsize = 1, max_treedepth = 10, ...) } \arguments{ -\item{data}{A .txt file containing the data to be modeled. Data columns should be labelled as follows: "subjID", "choice" and "outcome". See \bold{Details} below for more information.} +\item{data}{A .txt file containing the data to be modeled. Data columns should be labeled as: +"subjID", "choice", "outcome". See \bold{Details} below for more information.} -\item{niter}{Number of iterations, including warm-up.} +\item{niter}{Number of iterations, including warm-up. Defaults to 4000.} -\item{nwarmup}{Number of iterations used for warm-up only.} +\item{nwarmup}{Number of iterations used for warm-up only. Defaults to 1000.} -\item{nchain}{Number of chains to be run.} +\item{nchain}{Number of Markov chains to run. Defaults to 4.} -\item{ncore}{Integer value specifying how many CPUs to run the MCMC sampling on. Defaults to 1.} +\item{ncore}{Number of CPUs to be used for running. Defaults to 1.} -\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is high.} +\item{nthin}{Every \code{i == nthin} sample will be used to generate the posterior distribution. +Defaults to 1. A higher number can be used when auto-correlation within the MCMC sampling is +high.} -\item{inits}{Character value specifying how the initial values should be generated. Options are "fixed" or "random" or your own initial values.} +\item{inits}{Character value specifying how the initial values should be generated. Options are +"fixed" or "random", or your own initial values.} -\item{indPars}{Character value specifying how to summarize individual parameters. Current options are: "mean", "median", or "mode".} +\item{indPars}{Character value specifying how to summarize individual parameters. Current options +are: "mean", "median", or "mode".} -\item{saveDir}{Path to directory where .RData file of model output (\code{modelData}) can be saved. Leave blank if not interested.} +\item{modelRegressor}{Export model-based regressors? TRUE or FALSE. +Currently not available for this model.} -\item{modelRegressor}{Exporting model-based regressors? TRUE or FALSE. Currently not available for this model.} +\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults +to FALSE.} -\item{vb}{Use variational inference to approximately draw from a posterior distribution. Defaults to FALSE.} +\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file +size). Defaults to FALSE.} -\item{inc_postpred}{Include trial-level posterior predictive simulations in model output (may greatly increase file size). Defaults to FALSE.} +\item{adapt_delta}{Floating point value representing the target acceptance probability of a new +sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} -\item{adapt_delta}{Floating point number representing the target acceptance probability of a new sample in the MCMC chain. Must be between 0 and 1. See \bold{Details} below.} +\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can +take on each new iteration. See \bold{Details} below.} -\item{stepsize}{Integer value specifying the size of each leapfrog step that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{max_treedepth}{Integer value specifying how many leapfrog steps the MCMC sampler can take +on each new iteration. See \bold{Details} below.} -\item{max_treedepth}{Integer value specifying how many leapfrog steps that the MCMC sampler can take on each new iteration. See \bold{Details} below.} +\item{...}{Not used for this model.} } \value{ -\code{modelData} A class \code{"hBayesDM"} object with the following components: +A class "hBayesDM" object \code{modelData} with the following components: \describe{ - \item{\code{model}}{Character string with the name of the model (\code{"wcs_sql"}).} - \item{\code{allIndPars}}{\code{"data.frame"} containing the summarized parameter - values (as specified by \code{"indPars"}) for each subject.} - \item{\code{parVals}}{A \code{"list"} where each element contains posterior samples - over different model parameters. } - \item{\code{fit}}{A class \code{"stanfit"} object containing the fitted model.} - \item{\code{rawdata}}{\code{"data.frame"} containing the raw data used to fit the model, as specified by the user.} + \item{\code{model}}{Character value that is the name of the model ("wcs_sql").} + \item{\code{allIndPars}}{Data.frame containing the summarized parameter values (as specified by + \code{indPars}) for each subject.} + \item{\code{parVals}}{List object containing the posterior samples over different parameters.} + \item{\code{fit}}{A class \code{\link[rstan]{stanfit}} object that contains the fitted Stan + model.} + \item{\code{rawdata}}{Data.frame containing the raw data used to fit the model, as specified by + the user.} + + + } } \description{ -Hierarchical Bayesian Modeling of the Wisconsin Card Sorting (WCS) Task using the following parameters: "r" (reward sensitivity), "p" (punishment sensitivity), and "d" (decision consistency or inverse temperature). +Hierarchical Bayesian Modeling of the Wisconsin Card Sorting Task with the following parameters: + "r" (reward sensitivity), "p" (punishment sensitivity), "d" (decision consistency or inverse temperature). Contributor: \href{https://ccs-lab.github.io/team/dayeong-min/}{Dayeong Min} -\strong{MODEL:} -Sequential Learning Model (Bishara et al., 2010, Journal of Mathematical Psychology) +\strong{MODEL:} Sequential Learning Model (Bishara et al., 2010, Journal of Mathematical Psychology) } \details{ This section describes some of the function arguments in greater detail. -\strong{data} should be assigned a character value specifying the full path and name of the file, including the file extension -(e.g. ".txt"), that contains the behavioral data of all subjects of interest for the current analysis. -The file should be a \strong{tab-delimited} text (.txt) file whose rows represent trial-by-trial observations and columns -represent variables. For the Wisconsin Card Sorting Task, there should be at least three columns of data -with the labels "subjID", "choice", and "outcome". It is not necessary for the columns to be in this particular order, -however it is necessary that they be labelled correctly and contain the information below: +\strong{data} should be assigned a character value specifying the full path and name (including + extension information, e.g. ".txt") of the file that contains the behavioral data-set of all + subjects of interest for the current analysis. The file should be a \strong{tab-delimited} text + file, whose rows represent trial-by-trial observations and columns represent variables.\cr +For the Wisconsin Card Sorting Task, there should be 3 columns of data with the + labels "subjID", "choice", "outcome". It is not necessary for the columns to be in this particular order, + however it is necessary that they be labeled correctly and contain the information below: \describe{ - \item{\code{"subjID"}}{A unique identifier for each subject within data-set to be analyzed.} - \item{\code{"choice"}}{An integer value representing the chosen choice option of deck within the given trial (e.g., 1 to 4 in WCST).} - \item{\code{"outcome"}}{A 1 or 0 for outcome within each given trial (1 = correct, 0 = wrong).} + \item{"subjID"}{A unique identifier for each subject in the data-set.} + \item{"choice"}{Integer value indicating which deck was chosen on that trial: 1, 2, 3, or 4.} + \item{"outcome"}{1 or 0, indicating the outcome of that trial: correct == 1, wrong == 0.} + + + + + + } -\strong{*}Note: The data.txt file may contain other columns of data (e.g. "rule", "trial", etc.), but only the data with the column -names listed above will be used for analysis/modeling. As long as the columns above are present and labelled correctly, -there is no need to remove other miscellaneous data columns. - -\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored upon the -beginning of each chain. For those familiar with Bayesian methods, this value is equivalent to a burn-in sample. -Due to the nature of MCMC sampling, initial values (where the sampling chain begins) can have a heavy influence -on the generated posterior distributions. The \code{nwarmup} argument can be set to a high number in order to curb the -effects that initial values have on the resulting posteriors. - -\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling sequences) should be -used to draw samples from the posterior distribution. Since the posteriors are generated from a sampling -process, it is good practice to run multiple chains to ensure that a representative posterior is attained. When -sampling is completed, the multiple chains may be checked for convergence with the \code{plot(myModel, type = "trace")} -command. The chains should resemble a "furry caterpillar". - -\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC samples being chosen -to generate the posterior distributions. By default, \code{nthin} is equal to 1, hence every sample is used to -generate the posterior. - -\strong{Contol Parameters:} adapt_delta, stepsize, and max_treedepth are advanced options that give the user more control -over Stan's MCMC sampler. The Stan creators recommend that only advanced users change the default values, as alterations -can profoundly change the sampler's behavior. Refer to Hoffman & Gelman (2014, Journal of Machine Learning Research) for -more information on the functioning of the sampler control parameters. One can also refer to section 58.2 of the -\href{http://mc-stan.org/documentation/}{Stan User's Manual} for a less technical description of these arguments. +\strong{*}Note: The file may contain other columns of data (e.g. "ReactionTime", "trial_number", + etc.), but only the data within the column names listed above will be used during the modeling. + As long as the necessary columns mentioned above are present and labeled correctly, there is no + need to remove other miscellaneous data columns. + +\strong{nwarmup} is a numerical value that specifies how many MCMC samples should not be stored + upon the beginning of each chain. For those familiar with Bayesian methods, this is equivalent + to burn-in samples. Due to the nature of the MCMC algorithm, initial values (i.e. where the + sampling chains begin) can have a heavy influence on the generated posterior distributions. The + \code{nwarmup} argument can be set to a high number in order to curb the effects that initial + values have on the resulting posteriors. + +\strong{nchain} is a numerical value that specifies how many chains (i.e. independent sampling + sequences) should be used to draw samples from the posterior distribution. Since the posteriors + are generated from a sampling process, it is good practice to run multiple chains to ensure + that a reasonably representative posterior is attained. When the sampling is complete, it is + possible to check the multiple chains for convergence by running the following line of code: + \code{plot(output, type = "trace")}. The trace-plot should resemble a "furry caterpillar". + +\strong{nthin} is a numerical value that specifies the "skipping" behavior of the MCMC sampler, + using only every \code{i == nthin} samples to generate posterior distributions. By default, + \code{nthin} is equal to 1, meaning that every sample is used to generate the posterior. + +\strong{Control Parameters:} \code{adapt_delta}, \code{stepsize}, and \code{max_treedepth} are + advanced options that give the user more control over Stan's MCMC sampler. It is recommended + that only advanced users change the default values, as alterations can profoundly change the + sampler's behavior. Refer to 'The No-U-Turn Sampler: Adaptively Setting Path Lengths in + Hamiltonian Monte Carlo (Hoffman & Gelman, 2014, Journal of Machine Learning Research)' for + more information on the sampler control parameters. One can also refer to 'Section 34.2. HMC + Algorithm Parameters' of the \href{http://mc-stan.org/users/documentation/}{Stan User's Guide + and Reference Manual}, or to the help page for \code{\link[rstan]{stan}} for a less technical + description of these arguments. } \examples{ \dontrun{ # Run the model and store results in "output" -output <- wcs_sql(data = "example", niter = 2000, nwarmup = 1000, nchain = 3, ncore = 3) +output <- wcs_sql("example", niter = 2000, nwarmup = 1000, nchain = 4, ncore = 4) -# Visually check convergence of the sampling chains (should like like 'hairy caterpillars') -plot(output, type = 'trace') +# Visually check convergence of the sampling chains (should look like 'hairy caterpillars') +plot(output, type = "trace") # Check Rhat values (all Rhat values should be less than or equal to 1.1) rhat(output) @@ -120,13 +148,11 @@ printFit(output) } } \references{ -Hoffman, M. D., & Gelman, A. (2014). The No-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo. The -Journal of Machine Learning Research, 15(1), 1593-1623. - -Bishara, A. J., Kruschke, J. K., Stout, J. C., Bechara, A., McCabe, D. P., & Busemeyer, J. R. (2010). -Sequential learning models for the Wisconsin card sort task: Assessing processes in substance dependent individuals. -Journal of mathematical psychology, 54(1), 5-13. +Bishara, A. J., Kruschke, J. K., Stout, J. C., Bechara, A., McCabe, D. P., & Busemeyer, J. R. + (2010). Sequential learning models for the Wisconsin card sort task: Assessing processes in + substance dependent individuals. Journal of Mathematical Psychology, 54(1), 5-13. } \seealso{ -We refer users to our in-depth tutorial for an example of using hBayesDM: \url{https://rpubs.com/CCSL/hBayesDM} +We refer users to our in-depth tutorial for an example of using hBayesDM: + \url{https://rpubs.com/CCSL/hBayesDM} } diff --git a/src/.Rapp.history b/src/.Rapp.history deleted file mode 100644 index 93e9d986..00000000 --- a/src/.Rapp.history +++ /dev/null @@ -1 +0,0 @@ -load("/Users/haines.175/Dropbox/CCSL/R_Package_Project/Package_Build/hBayesDM-GitHub/src/symbols.rds") diff --git a/src/Makevars b/src/Makevars old mode 100755 new mode 100644 index 36002269..720131ef --- a/src/Makevars +++ b/src/Makevars @@ -1,10 +1,22 @@ -STANHEADERS_SRC = `"$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" --vanilla -e "cat(system.file('include', 'src', package = 'StanHeaders'))"` +STANHEADERS_SRC = `"$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "cat(system.file('include', 'src', package = 'StanHeaders', mustWork = TRUE), sep = '')"` +PKG_CPPFLAGS = -I"../inst/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DBOOST_MATH_OVERFLOW_ERROR_POLICY=errno_on_error -DBOOST_NO_AUTO_PTR + CXX_STD = CXX14 -PKG_CPPFLAGS = -I"../src/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DBOOST_MATH_OVERFLOW_ERROR_POLICY=errno_on_error -SHLIB_LDFLAGS = $(SHLIB_CXXLDFLAGS) -SHLIB_LD = $(SHLIB_CXXLD) +SOURCE_PATH = ../inst/stan_files +ifeq ($(BUILD_ALL), true) + SOURCES = $(wildcard $(SOURCE_PATH)/*.stan) +endif +OBJECTS = $(SOURCES:.stan=.o) init.o all: $(SHLIB) - @if test -e "/usr/bin/install_name_tool" && test -e "/usr/local/clang4/lib/libc++.1.dylib" && test -e "/usr/lib/libc++.1.dylib"; then /usr/bin/install_name_tool -change /usr/local/clang4/lib/libc++.1.dylib /usr/lib/libc++.1.dylib $(SHLIB); fi -.phony: all +clean: + rm -rf "$(SOURCE_PATH)/*.o" + rm -rf *.so *.o + rm -rf "$(SOURCE_PATH)/*.cc" + rm -rf "$(SOURCE_PATH)/*.hpp" + +%.cc: %.stan + "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "source(file.path('..', 'tools', 'make_cc.R')); make_cc(commandArgs(TRUE))" $< + +.phony: all clean diff --git a/src/Makevars.win b/src/Makevars.win index e4a9abe4..c9a5fc0b 100644 --- a/src/Makevars.win +++ b/src/Makevars.win @@ -1,5 +1,22 @@ -STANHEADERS_SRC = `"$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" --vanilla -e "cat(system.file('include', 'src', package = 'StanHeaders'))"` +STANHEADERS_SRC = `"$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "cat(system.file('include', 'src', package = 'StanHeaders', mustWork = TRUE), sep = '')"` +PKG_CPPFLAGS = -I"../inst/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DBOOST_MATH_OVERFLOW_ERROR_POLICY=errno_on_error -DBOOST_NO_AUTO_PTR + CXX_STD = CXX14 -PKG_CPPFLAGS = -I"../src/include" -I"$(STANHEADERS_SRC)" -DBOOST_DISABLE_ASSERTS -DEIGEN_NO_DEBUG -DBOOST_MATH_OVERFLOW_ERROR_POLICY=errno_on_error -flto -ffat-lto-objects -SHLIB_LDFLAGS = $(SHLIB_CXXLDFLAGS) -SHLIB_LD = $(SHLIB_CXXLD) +SOURCE_PATH = ../inst/stan_files +ifeq ($(BUILD_ALL), true) + SOURCES = $(wildcard $(SOURCE_PATH)/*.stan) +endif +OBJECTS = $(SOURCES:.stan=.o) init.o + +all: $(SHLIB) + +clean: + RM -rf "$(SOURCE_PATH)/*.o" + RM -rf *.so *.o + RM -rf "$(SOURCE_PATH)/*.cc" + RM -rf "$(SOURCE_PATH)/*.hpp" + +%.cc: %.stan + "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "source(file.path('..', 'tools', 'make_cc.R')); make_cc(commandArgs(TRUE))" $< + +.phony: clean diff --git a/src/Modules.cpp b/src/Modules.cpp deleted file mode 100644 index a1d01fac..00000000 --- a/src/Modules.cpp +++ /dev/null @@ -1,1363 +0,0 @@ -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4bandit2arm_delta_mod) { - - - class_ >("model_bandit2arm_delta") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4bandit4arm_4par_mod) { - - - class_ >("model_bandit4arm_4par") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4bandit4arm_lapse_mod) { - - - class_ >("model_bandit4arm_lapse") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4bart_par4_mod) { - - - class_ >("model_bart_par4") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4choiceRT_ddm_mod) { - - - class_ >("model_choiceRT_ddm") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4choiceRT_ddm_single_mod) { - - - class_ >("model_choiceRT_ddm_single") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4choiceRT_lba_mod) { - - - class_ >("model_choiceRT_lba") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4choiceRT_lba_single_mod) { - - - class_ >("model_choiceRT_lba_single") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4cra_exp_mod) { - - - class_ >("model_cra_exp") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4cra_linear_mod) { - - - class_ >("model_cra_linear") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4dd_cs_mod) { - - - class_ >("model_dd_cs") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4dd_cs_single_mod) { - - - class_ >("model_dd_cs_single") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4dd_exp_mod) { - - - class_ >("model_dd_exp") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4dd_hyperbolic_mod) { - - - class_ >("model_dd_hyperbolic") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4dd_hyperbolic_single_mod) { - - - class_ >("model_dd_hyperbolic_single") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4gng_m1_mod) { - - - class_ >("model_gng_m1") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4gng_m1_reg_mod) { - - - class_ >("model_gng_m1_reg") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4gng_m2_mod) { - - - class_ >("model_gng_m2") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4gng_m2_reg_mod) { - - - class_ >("model_gng_m2_reg") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4gng_m3_mod) { - - - class_ >("model_gng_m3") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4gng_m3_reg_mod) { - - - class_ >("model_gng_m3_reg") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4gng_m4_mod) { - - - class_ >("model_gng_m4") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4gng_m4_reg_mod) { - - - class_ >("model_gng_m4_reg") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4igt_orl_mod) { - - - class_ >("model_igt_orl") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4igt_pvl_decay_mod) { - - - class_ >("model_igt_pvl_decay") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4igt_pvl_delta_mod) { - - - class_ >("model_igt_pvl_delta") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4igt_vpp_mod) { - - - class_ >("model_igt_vpp") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4peer_ocu_mod) { - - - class_ >("model_peer_ocu") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4prl_ewa_mod) { - - - class_ >("model_prl_ewa") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4prl_fictitious_mod) { - - - class_ >("model_prl_fictitious") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4prl_fictitious_multipleB_mod) { - - - class_ >("model_prl_fictitious_multipleB") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4prl_fictitious_rp_mod) { - - - class_ >("model_prl_fictitious_rp") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4prl_fictitious_rp_woa_mod) { - - - class_ >("model_prl_fictitious_rp_woa") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4prl_fictitious_woa_mod) { - - - class_ >("model_prl_fictitious_woa") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4prl_rp_mod) { - - - class_ >("model_prl_rp") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4prl_rp_multipleB_mod) { - - - class_ >("model_prl_rp_multipleB") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4pst_gainloss_Q_mod) { - - - class_ >("model_pst_gainloss_Q") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4ra_noLA_mod) { - - - class_ >("model_ra_noLA") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4ra_noRA_mod) { - - - class_ >("model_ra_noRA") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4ra_prospect_mod) { - - - class_ >("model_ra_prospect") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4rdt_happiness_mod) { - - - class_ >("model_rdt_happiness") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4ts_par4_mod) { - - - class_ >("model_ts_par4") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4ts_par6_mod) { - - - class_ >("model_ts_par6") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4ts_par7_mod) { - - - class_ >("model_ts_par7") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4ug_bayes_mod) { - - - class_ >("model_ug_bayes") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4ug_delta_mod) { - - - class_ >("model_ug_delta") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} -#include -using namespace Rcpp ; -#include "include/models.hpp" - -RCPP_MODULE(stan_fit4wcs_sql_mod) { - - - class_ >("model_wcs_sql") - - .constructor() - - - .method("call_sampler", &rstan::stan_fit ::call_sampler) - .method("param_names", &rstan::stan_fit ::param_names) - .method("param_names_oi", &rstan::stan_fit ::param_names_oi) - .method("param_fnames_oi", &rstan::stan_fit ::param_fnames_oi) - .method("param_dims", &rstan::stan_fit ::param_dims) - .method("param_dims_oi", &rstan::stan_fit ::param_dims_oi) - .method("update_param_oi", &rstan::stan_fit ::update_param_oi) - .method("param_oi_tidx", &rstan::stan_fit ::param_oi_tidx) - .method("grad_log_prob", &rstan::stan_fit ::grad_log_prob) - .method("log_prob", &rstan::stan_fit ::log_prob) - .method("unconstrain_pars", &rstan::stan_fit ::unconstrain_pars) - .method("constrain_pars", &rstan::stan_fit ::constrain_pars) - .method("num_pars_unconstrained", &rstan::stan_fit ::num_pars_unconstrained) - .method("unconstrained_param_names", &rstan::stan_fit ::unconstrained_param_names) - .method("constrained_param_names", &rstan::stan_fit ::constrained_param_names) - ; -} diff --git a/src/include/models.hpp b/src/include/models.hpp deleted file mode 100755 index de03dced..00000000 --- a/src/include/models.hpp +++ /dev/null @@ -1,50614 +0,0 @@ -/* - hBayesDM is distributed under the terms of the GNU General Public - License but without any warranty. See the GNU General Public - License for more details. -*/ -#ifndef MODELS_HPP -#define MODELS_HPP -#define STAN__SERVICES__COMMAND_HPP -#include -// Code generated by Stan version 2.18.0 - -#include - -namespace model_bandit2arm_delta_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_bandit2arm_delta"); - reader.add_event(108, 106, "end", "model_bandit2arm_delta"); - return reader; -} - -class model_bandit2arm_delta : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_bandit2arm_delta(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_bandit2arm_delta(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_bandit2arm_delta_namespace::model_bandit2arm_delta"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("A_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_bandit2arm_delta() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("A_pr"))) - throw std::runtime_error("variable A_pr missing"); - vals_r__ = context__.vals_r("A_pr"); - pos__ = 0U; - validate_non_negative_index("A_pr", "N", N); - context__.validate_dims("initialization", "A_pr", "vector_d", context__.to_vec(N)); - vector_d A_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - A_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(A_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A_pr: ") + e.what()); - } - - if (!(context__.contains_r("tau_pr"))) - throw std::runtime_error("variable tau_pr missing"); - vals_r__ = context__.vals_r("tau_pr"); - pos__ = 0U; - validate_non_negative_index("tau_pr", "N", N); - context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); - vector_d tau_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix A_pr; - (void) A_pr; // dummy to suppress unused var warning - if (jacobian__) - A_pr = in__.vector_constrain(N,lp__); - else - A_pr = in__.vector_constrain(N); - - Eigen::Matrix tau_pr; - (void) tau_pr; // dummy to suppress unused var warning - if (jacobian__) - tau_pr = in__.vector_constrain(N,lp__); - else - tau_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 5), - "assigning variable tau"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(A(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: A" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - check_greater_or_equal(function__,"tau",tau,0); - check_less_or_equal(function__,"tau",tau,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(A_pr, 0, 1)); - lp_accum__.add(normal_log(tau_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(get_base1(tau,i,"tau",1),ev))); - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * PE))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("A_pr"); - names__.push_back("tau_pr"); - names__.push_back("A"); - names__.push_back("tau"); - names__.push_back("mu_A"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_bandit2arm_delta_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d A_pr = in__.vector_constrain(N); - vector_d tau_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 5), - "assigning variable tau"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - check_greater_or_equal(function__,"tau",tau,0); - check_less_or_equal(function__,"tau",tau,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - - stan::math::initialize(mu_A, DUMMY_VAR__); - stan::math::fill(mu_A,DUMMY_VAR__); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_A, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_tau, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(get_base1(tau,i,"tau",1),ev)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(get_base1(tau,i,"tau",1),ev)), base_rng__), - "assigning variable y_pred"); - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * PE))), - "assigning variable ev"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_A",mu_A,0); - check_less_or_equal(function__,"mu_A",mu_A,1); - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - check_less_or_equal(function__,"mu_tau",mu_tau,5); - - // write generated quantities - vars__.push_back(mu_A); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_bandit2arm_delta"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_bandit4arm_4par_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_bandit4arm_4par"); - reader.add_event(175, 173, "end", "model_bandit4arm_4par"); - return reader; -} - -class model_bandit4arm_4par : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > rew; - vector > los; - vector > choice; - vector_d initV; -public: - model_bandit4arm_4par(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_bandit4arm_4par(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_bandit4arm_4par_namespace::model_bandit4arm_4par"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("rew", "N", N); - validate_non_negative_index("rew", "T", T); - context__.validate_dims("data initialization", "rew", "double", context__.to_vec(N,T)); - validate_non_negative_index("rew", "N", N); - validate_non_negative_index("rew", "T", T); - rew = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("rew"); - pos__ = 0; - size_t rew_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < rew_limit_1__; ++i_1__) { - size_t rew_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < rew_limit_0__; ++i_0__) { - rew[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("los", "N", N); - validate_non_negative_index("los", "T", T); - context__.validate_dims("data initialization", "los", "double", context__.to_vec(N,T)); - validate_non_negative_index("los", "N", N); - validate_non_negative_index("los", "T", T); - los = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("los"); - pos__ = 0; - size_t los_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < los_limit_1__; ++i_1__) { - size_t los_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < los_limit_0__; ++i_0__) { - los[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("R_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("P_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_bandit4arm_4par() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("R_pr"))) - throw std::runtime_error("variable R_pr missing"); - vals_r__ = context__.vals_r("R_pr"); - pos__ = 0U; - validate_non_negative_index("R_pr", "N", N); - context__.validate_dims("initialization", "R_pr", "vector_d", context__.to_vec(N)); - vector_d R_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - R_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(R_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable R_pr: ") + e.what()); - } - - if (!(context__.contains_r("P_pr"))) - throw std::runtime_error("variable P_pr missing"); - vals_r__ = context__.vals_r("P_pr"); - pos__ = 0U; - validate_non_negative_index("P_pr", "N", N); - context__.validate_dims("initialization", "P_pr", "vector_d", context__.to_vec(N)); - vector_d P_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - P_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(P_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable P_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix R_pr; - (void) R_pr; // dummy to suppress unused var warning - if (jacobian__) - R_pr = in__.vector_constrain(N,lp__); - else - R_pr = in__.vector_constrain(N); - - Eigen::Matrix P_pr; - (void) P_pr; // dummy to suppress unused var warning - if (jacobian__) - P_pr = in__.vector_constrain(N,lp__); - else - P_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("R", "N", N); - Eigen::Matrix R(static_cast(N)); - (void) R; // dummy to suppress unused var warning - - stan::math::initialize(R, DUMMY_VAR__); - stan::math::fill(R,DUMMY_VAR__); - validate_non_negative_index("P", "N", N); - Eigen::Matrix P(static_cast(N)); - (void) P; // dummy to suppress unused var warning - - stan::math::initialize(P, DUMMY_VAR__); - stan::math::fill(P,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(R, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(R_pr,i,"R_pr",1)))) * 30), - "assigning variable R"); - stan::model::assign(P, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(P_pr,i,"P_pr",1)))) * 30), - "assigning variable P"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(R(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: R" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(P(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: P" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"R",R,0); - check_greater_or_equal(function__,"P",P,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(Arew_pr, 0, 1.0)); - lp_accum__.add(normal_log(Apun_pr, 0, 1.0)); - lp_accum__.add(normal_log(R_pr, 0, 1.0)); - lp_accum__.add(normal_log(P_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("Qr", "4", 4); - Eigen::Matrix Qr(static_cast(4)); - (void) Qr; // dummy to suppress unused var warning - - stan::math::initialize(Qr, DUMMY_VAR__); - stan::math::fill(Qr,DUMMY_VAR__); - validate_non_negative_index("Qp", "4", 4); - Eigen::Matrix Qp(static_cast(4)); - (void) Qp; // dummy to suppress unused var warning - - stan::math::initialize(Qp, DUMMY_VAR__); - stan::math::fill(Qp,DUMMY_VAR__); - validate_non_negative_index("PEr_fic", "4", 4); - Eigen::Matrix PEr_fic(static_cast(4)); - (void) PEr_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEr_fic, DUMMY_VAR__); - stan::math::fill(PEr_fic,DUMMY_VAR__); - validate_non_negative_index("PEp_fic", "4", 4); - Eigen::Matrix PEp_fic(static_cast(4)); - (void) PEp_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEp_fic, DUMMY_VAR__); - stan::math::fill(PEp_fic,DUMMY_VAR__); - validate_non_negative_index("Qsum", "4", 4); - Eigen::Matrix Qsum(static_cast(4)); - (void) Qsum; // dummy to suppress unused var warning - - stan::math::initialize(Qsum, DUMMY_VAR__); - stan::math::fill(Qsum,DUMMY_VAR__); - local_scalar_t__ Qr_chosen; - (void) Qr_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qr_chosen, DUMMY_VAR__); - stan::math::fill(Qr_chosen,DUMMY_VAR__); - local_scalar_t__ Qp_chosen; - (void) Qp_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qp_chosen, DUMMY_VAR__); - stan::math::fill(Qp_chosen,DUMMY_VAR__); - local_scalar_t__ PEr; - (void) PEr; // dummy to suppress unused var warning - - stan::math::initialize(PEr, DUMMY_VAR__); - stan::math::fill(PEr,DUMMY_VAR__); - local_scalar_t__ PEp; - (void) PEp; // dummy to suppress unused var warning - - stan::math::initialize(PEp, DUMMY_VAR__); - stan::math::fill(PEp,DUMMY_VAR__); - - - stan::math::assign(Qr, initV); - stan::math::assign(Qp, initV); - stan::math::assign(Qsum, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), Qsum)); - stan::math::assign(PEr, ((get_base1(R,i,"R",1) * get_base1(get_base1(rew,i,"rew",1),t,"rew",2)) - get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1))); - stan::math::assign(PEp, ((get_base1(P,i,"P",1) * get_base1(get_base1(los,i,"los",1),t,"los",2)) - get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1))); - stan::math::assign(PEr_fic, minus(Qr)); - stan::math::assign(PEp_fic, minus(Qp)); - stan::math::assign(Qr_chosen, get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1)); - stan::math::assign(Qp_chosen, get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1)); - stan::math::assign(Qr, stan::model::deep_copy(add(Qr,multiply(get_base1(Arew,i,"Arew",1),PEr_fic)))); - stan::math::assign(Qp, stan::model::deep_copy(add(Qp,multiply(get_base1(Apun,i,"Apun",1),PEp_fic)))); - stan::model::assign(Qr, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qr_chosen + (get_base1(Arew,i,"Arew",1) * PEr)), - "assigning variable Qr"); - stan::model::assign(Qp, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qp_chosen + (get_base1(Apun,i,"Apun",1) * PEp)), - "assigning variable Qp"); - stan::math::assign(Qsum, add(Qr,Qp)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Arew_pr"); - names__.push_back("Apun_pr"); - names__.push_back("R_pr"); - names__.push_back("P_pr"); - names__.push_back("Arew"); - names__.push_back("Apun"); - names__.push_back("R"); - names__.push_back("P"); - names__.push_back("mu_Arew"); - names__.push_back("mu_Apun"); - names__.push_back("mu_R"); - names__.push_back("mu_P"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_bandit4arm_4par_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d R_pr = in__.vector_constrain(N); - vector_d P_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(R_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(P_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("R", "N", N); - Eigen::Matrix R(static_cast(N)); - (void) R; // dummy to suppress unused var warning - - stan::math::initialize(R, DUMMY_VAR__); - stan::math::fill(R,DUMMY_VAR__); - validate_non_negative_index("P", "N", N); - Eigen::Matrix P(static_cast(N)); - (void) P; // dummy to suppress unused var warning - - stan::math::initialize(P, DUMMY_VAR__); - stan::math::fill(P,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(R, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(R_pr,i,"R_pr",1)))) * 30), - "assigning variable R"); - stan::model::assign(P, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(P_pr,i,"P_pr",1)))) * 30), - "assigning variable P"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"R",R,0); - check_greater_or_equal(function__,"P",P,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(R[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(P[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - local_scalar_t__ mu_R; - (void) mu_R; // dummy to suppress unused var warning - - stan::math::initialize(mu_R, DUMMY_VAR__); - stan::math::fill(mu_R,DUMMY_VAR__); - local_scalar_t__ mu_P; - (void) mu_P; // dummy to suppress unused var warning - - stan::math::initialize(mu_P, DUMMY_VAR__); - stan::math::fill(mu_P,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_R, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 30)); - stan::math::assign(mu_P, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 30)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("Qr", "4", 4); - Eigen::Matrix Qr(static_cast(4)); - (void) Qr; // dummy to suppress unused var warning - - stan::math::initialize(Qr, DUMMY_VAR__); - stan::math::fill(Qr,DUMMY_VAR__); - validate_non_negative_index("Qp", "4", 4); - Eigen::Matrix Qp(static_cast(4)); - (void) Qp; // dummy to suppress unused var warning - - stan::math::initialize(Qp, DUMMY_VAR__); - stan::math::fill(Qp,DUMMY_VAR__); - validate_non_negative_index("PEr_fic", "4", 4); - Eigen::Matrix PEr_fic(static_cast(4)); - (void) PEr_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEr_fic, DUMMY_VAR__); - stan::math::fill(PEr_fic,DUMMY_VAR__); - validate_non_negative_index("PEp_fic", "4", 4); - Eigen::Matrix PEp_fic(static_cast(4)); - (void) PEp_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEp_fic, DUMMY_VAR__); - stan::math::fill(PEp_fic,DUMMY_VAR__); - validate_non_negative_index("Qsum", "4", 4); - Eigen::Matrix Qsum(static_cast(4)); - (void) Qsum; // dummy to suppress unused var warning - - stan::math::initialize(Qsum, DUMMY_VAR__); - stan::math::fill(Qsum,DUMMY_VAR__); - local_scalar_t__ Qr_chosen; - (void) Qr_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qr_chosen, DUMMY_VAR__); - stan::math::fill(Qr_chosen,DUMMY_VAR__); - local_scalar_t__ Qp_chosen; - (void) Qp_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qp_chosen, DUMMY_VAR__); - stan::math::fill(Qp_chosen,DUMMY_VAR__); - local_scalar_t__ PEr; - (void) PEr; // dummy to suppress unused var warning - - stan::math::initialize(PEr, DUMMY_VAR__); - stan::math::fill(PEr,DUMMY_VAR__); - local_scalar_t__ PEp; - (void) PEp; // dummy to suppress unused var warning - - stan::math::initialize(PEp, DUMMY_VAR__); - stan::math::fill(PEp,DUMMY_VAR__); - - - stan::math::assign(Qr, initV); - stan::math::assign(Qp, initV); - stan::math::assign(Qsum, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0.0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),Qsum))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(Qsum), base_rng__), - "assigning variable y_pred"); - stan::math::assign(PEr, ((get_base1(R,i,"R",1) * get_base1(get_base1(rew,i,"rew",1),t,"rew",2)) - get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1))); - stan::math::assign(PEp, ((get_base1(P,i,"P",1) * get_base1(get_base1(los,i,"los",1),t,"los",2)) - get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1))); - stan::math::assign(PEr_fic, minus(Qr)); - stan::math::assign(PEp_fic, minus(Qp)); - stan::math::assign(Qr_chosen, get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1)); - stan::math::assign(Qp_chosen, get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1)); - stan::math::assign(Qr, stan::model::deep_copy(add(Qr,multiply(get_base1(Arew,i,"Arew",1),PEr_fic)))); - stan::math::assign(Qp, stan::model::deep_copy(add(Qp,multiply(get_base1(Apun,i,"Apun",1),PEp_fic)))); - stan::model::assign(Qr, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qr_chosen + (get_base1(Arew,i,"Arew",1) * PEr)), - "assigning variable Qr"); - stan::model::assign(Qp, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qp_chosen + (get_base1(Apun,i,"Apun",1) * PEp)), - "assigning variable Qp"); - stan::math::assign(Qsum, add(Qr,Qp)); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - check_greater_or_equal(function__,"mu_R",mu_R,0); - check_greater_or_equal(function__,"mu_P",mu_P,0); - - // write generated quantities - vars__.push_back(mu_Arew); - vars__.push_back(mu_Apun); - vars__.push_back(mu_R); - vars__.push_back(mu_P); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_bandit4arm_4par"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_R"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_P"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_R"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_P"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_bandit4arm_lapse_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_bandit4arm_lapse"); - reader.add_event(181, 179, "end", "model_bandit4arm_lapse"); - return reader; -} - -class model_bandit4arm_lapse : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > rew; - vector > los; - vector > choice; - vector_d initV; -public: - model_bandit4arm_lapse(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_bandit4arm_lapse(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_bandit4arm_lapse_namespace::model_bandit4arm_lapse"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("rew", "N", N); - validate_non_negative_index("rew", "T", T); - context__.validate_dims("data initialization", "rew", "double", context__.to_vec(N,T)); - validate_non_negative_index("rew", "N", N); - validate_non_negative_index("rew", "T", T); - rew = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("rew"); - pos__ = 0; - size_t rew_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < rew_limit_1__; ++i_1__) { - size_t rew_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < rew_limit_0__; ++i_0__) { - rew[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("los", "N", N); - validate_non_negative_index("los", "T", T); - context__.validate_dims("data initialization", "los", "double", context__.to_vec(N,T)); - validate_non_negative_index("los", "N", N); - validate_non_negative_index("los", "T", T); - los = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("los"); - pos__ = 0; - size_t los_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < los_limit_1__; ++i_1__) { - size_t los_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < los_limit_0__; ++i_0__) { - los[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "5", 5); - num_params_r__ += 5; - validate_non_negative_index("sigma", "5", 5); - num_params_r__ += 5; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("R_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("P_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_bandit4arm_lapse() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "5", 5); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(5)); - vector_d mu_p(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "5", 5); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(5)); - vector_d sigma(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("R_pr"))) - throw std::runtime_error("variable R_pr missing"); - vals_r__ = context__.vals_r("R_pr"); - pos__ = 0U; - validate_non_negative_index("R_pr", "N", N); - context__.validate_dims("initialization", "R_pr", "vector_d", context__.to_vec(N)); - vector_d R_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - R_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(R_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable R_pr: ") + e.what()); - } - - if (!(context__.contains_r("P_pr"))) - throw std::runtime_error("variable P_pr missing"); - vals_r__ = context__.vals_r("P_pr"); - pos__ = 0U; - validate_non_negative_index("P_pr", "N", N); - context__.validate_dims("initialization", "P_pr", "vector_d", context__.to_vec(N)); - vector_d P_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - P_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(P_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable P_pr: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(5,lp__); - else - mu_p = in__.vector_constrain(5); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,5,lp__); - else - sigma = in__.vector_lb_constrain(0,5); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix R_pr; - (void) R_pr; // dummy to suppress unused var warning - if (jacobian__) - R_pr = in__.vector_constrain(N,lp__); - else - R_pr = in__.vector_constrain(N); - - Eigen::Matrix P_pr; - (void) P_pr; // dummy to suppress unused var warning - if (jacobian__) - P_pr = in__.vector_constrain(N,lp__); - else - P_pr = in__.vector_constrain(N); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("R", "N", N); - Eigen::Matrix R(static_cast(N)); - (void) R; // dummy to suppress unused var warning - - stan::math::initialize(R, DUMMY_VAR__); - stan::math::fill(R,DUMMY_VAR__); - validate_non_negative_index("P", "N", N); - Eigen::Matrix P(static_cast(N)); - (void) P; // dummy to suppress unused var warning - - stan::math::initialize(P, DUMMY_VAR__); - stan::math::fill(P,DUMMY_VAR__); - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(R, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(R_pr,i,"R_pr",1)))) * 30), - "assigning variable R"); - stan::model::assign(P, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(P_pr,i,"P_pr",1)))) * 30), - "assigning variable P"); - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(R(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: R" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(P(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: P" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"R",R,0); - check_greater_or_equal(function__,"P",P,0); - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(Arew_pr, 0, 1.0)); - lp_accum__.add(normal_log(Apun_pr, 0, 1.0)); - lp_accum__.add(normal_log(R_pr, 0, 1.0)); - lp_accum__.add(normal_log(P_pr, 0, 1.0)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("Qr", "4", 4); - Eigen::Matrix Qr(static_cast(4)); - (void) Qr; // dummy to suppress unused var warning - - stan::math::initialize(Qr, DUMMY_VAR__); - stan::math::fill(Qr,DUMMY_VAR__); - validate_non_negative_index("Qp", "4", 4); - Eigen::Matrix Qp(static_cast(4)); - (void) Qp; // dummy to suppress unused var warning - - stan::math::initialize(Qp, DUMMY_VAR__); - stan::math::fill(Qp,DUMMY_VAR__); - validate_non_negative_index("PEr_fic", "4", 4); - Eigen::Matrix PEr_fic(static_cast(4)); - (void) PEr_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEr_fic, DUMMY_VAR__); - stan::math::fill(PEr_fic,DUMMY_VAR__); - validate_non_negative_index("PEp_fic", "4", 4); - Eigen::Matrix PEp_fic(static_cast(4)); - (void) PEp_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEp_fic, DUMMY_VAR__); - stan::math::fill(PEp_fic,DUMMY_VAR__); - validate_non_negative_index("Qsum", "4", 4); - Eigen::Matrix Qsum(static_cast(4)); - (void) Qsum; // dummy to suppress unused var warning - - stan::math::initialize(Qsum, DUMMY_VAR__); - stan::math::fill(Qsum,DUMMY_VAR__); - local_scalar_t__ Qr_chosen; - (void) Qr_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qr_chosen, DUMMY_VAR__); - stan::math::fill(Qr_chosen,DUMMY_VAR__); - local_scalar_t__ Qp_chosen; - (void) Qp_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qp_chosen, DUMMY_VAR__); - stan::math::fill(Qp_chosen,DUMMY_VAR__); - local_scalar_t__ PEr; - (void) PEr; // dummy to suppress unused var warning - - stan::math::initialize(PEr, DUMMY_VAR__); - stan::math::fill(PEr,DUMMY_VAR__); - local_scalar_t__ PEp; - (void) PEp; // dummy to suppress unused var warning - - stan::math::initialize(PEp, DUMMY_VAR__); - stan::math::fill(PEp,DUMMY_VAR__); - - - stan::math::assign(Qr, initV); - stan::math::assign(Qp, initV); - stan::math::assign(Qsum, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), add(multiply(softmax(Qsum),(1 - get_base1(xi,i,"xi",1))),(get_base1(xi,i,"xi",1) / 4)))); - stan::math::assign(PEr, ((get_base1(R,i,"R",1) * get_base1(get_base1(rew,i,"rew",1),t,"rew",2)) - get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1))); - stan::math::assign(PEp, ((get_base1(P,i,"P",1) * get_base1(get_base1(los,i,"los",1),t,"los",2)) - get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1))); - stan::math::assign(PEr_fic, minus(Qr)); - stan::math::assign(PEp_fic, minus(Qp)); - stan::math::assign(Qr_chosen, get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1)); - stan::math::assign(Qp_chosen, get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1)); - stan::math::assign(Qr, stan::model::deep_copy(add(Qr,multiply(get_base1(Arew,i,"Arew",1),PEr_fic)))); - stan::math::assign(Qp, stan::model::deep_copy(add(Qp,multiply(get_base1(Apun,i,"Apun",1),PEp_fic)))); - stan::model::assign(Qr, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qr_chosen + (get_base1(Arew,i,"Arew",1) * PEr)), - "assigning variable Qr"); - stan::model::assign(Qp, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qp_chosen + (get_base1(Apun,i,"Apun",1) * PEp)), - "assigning variable Qp"); - stan::math::assign(Qsum, add(Qr,Qp)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Arew_pr"); - names__.push_back("Apun_pr"); - names__.push_back("R_pr"); - names__.push_back("P_pr"); - names__.push_back("xi_pr"); - names__.push_back("Arew"); - names__.push_back("Apun"); - names__.push_back("R"); - names__.push_back("P"); - names__.push_back("xi"); - names__.push_back("mu_Arew"); - names__.push_back("mu_Apun"); - names__.push_back("mu_R"); - names__.push_back("mu_P"); - names__.push_back("mu_xi"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_bandit4arm_lapse_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(5); - vector_d sigma = in__.vector_lb_constrain(0,5); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d R_pr = in__.vector_constrain(N); - vector_d P_pr = in__.vector_constrain(N); - vector_d xi_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(R_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(P_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("R", "N", N); - Eigen::Matrix R(static_cast(N)); - (void) R; // dummy to suppress unused var warning - - stan::math::initialize(R, DUMMY_VAR__); - stan::math::fill(R,DUMMY_VAR__); - validate_non_negative_index("P", "N", N); - Eigen::Matrix P(static_cast(N)); - (void) P; // dummy to suppress unused var warning - - stan::math::initialize(P, DUMMY_VAR__); - stan::math::fill(P,DUMMY_VAR__); - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(R, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(R_pr,i,"R_pr",1)))) * 30), - "assigning variable R"); - stan::model::assign(P, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(P_pr,i,"P_pr",1)))) * 30), - "assigning variable P"); - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"R",R,0); - check_greater_or_equal(function__,"P",P,0); - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(R[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(P[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - local_scalar_t__ mu_R; - (void) mu_R; // dummy to suppress unused var warning - - stan::math::initialize(mu_R, DUMMY_VAR__); - stan::math::fill(mu_R,DUMMY_VAR__); - local_scalar_t__ mu_P; - (void) mu_P; // dummy to suppress unused var warning - - stan::math::initialize(mu_P, DUMMY_VAR__); - stan::math::fill(mu_P,DUMMY_VAR__); - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_R, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 30)); - stan::math::assign(mu_P, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 30)); - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,5,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("Qr", "4", 4); - Eigen::Matrix Qr(static_cast(4)); - (void) Qr; // dummy to suppress unused var warning - - stan::math::initialize(Qr, DUMMY_VAR__); - stan::math::fill(Qr,DUMMY_VAR__); - validate_non_negative_index("Qp", "4", 4); - Eigen::Matrix Qp(static_cast(4)); - (void) Qp; // dummy to suppress unused var warning - - stan::math::initialize(Qp, DUMMY_VAR__); - stan::math::fill(Qp,DUMMY_VAR__); - validate_non_negative_index("PEr_fic", "4", 4); - Eigen::Matrix PEr_fic(static_cast(4)); - (void) PEr_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEr_fic, DUMMY_VAR__); - stan::math::fill(PEr_fic,DUMMY_VAR__); - validate_non_negative_index("PEp_fic", "4", 4); - Eigen::Matrix PEp_fic(static_cast(4)); - (void) PEp_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEp_fic, DUMMY_VAR__); - stan::math::fill(PEp_fic,DUMMY_VAR__); - validate_non_negative_index("Qsum", "4", 4); - Eigen::Matrix Qsum(static_cast(4)); - (void) Qsum; // dummy to suppress unused var warning - - stan::math::initialize(Qsum, DUMMY_VAR__); - stan::math::fill(Qsum,DUMMY_VAR__); - local_scalar_t__ Qr_chosen; - (void) Qr_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qr_chosen, DUMMY_VAR__); - stan::math::fill(Qr_chosen,DUMMY_VAR__); - local_scalar_t__ Qp_chosen; - (void) Qp_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qp_chosen, DUMMY_VAR__); - stan::math::fill(Qp_chosen,DUMMY_VAR__); - local_scalar_t__ PEr; - (void) PEr; // dummy to suppress unused var warning - - stan::math::initialize(PEr, DUMMY_VAR__); - stan::math::fill(PEr,DUMMY_VAR__); - local_scalar_t__ PEp; - (void) PEp; // dummy to suppress unused var warning - - stan::math::initialize(PEp, DUMMY_VAR__); - stan::math::fill(PEp,DUMMY_VAR__); - - - stan::math::assign(Qr, initV); - stan::math::assign(Qp, initV); - stan::math::assign(Qsum, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0.0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),add(multiply(softmax(Qsum),(1 - get_base1(xi,i,"xi",1))),(get_base1(xi,i,"xi",1) / 4))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(add(multiply(softmax(Qsum),(1 - get_base1(xi,i,"xi",1))),(get_base1(xi,i,"xi",1) / 4)), base_rng__), - "assigning variable y_pred"); - stan::math::assign(PEr, ((get_base1(R,i,"R",1) * get_base1(get_base1(rew,i,"rew",1),t,"rew",2)) - get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1))); - stan::math::assign(PEp, ((get_base1(P,i,"P",1) * get_base1(get_base1(los,i,"los",1),t,"los",2)) - get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1))); - stan::math::assign(PEr_fic, minus(Qr)); - stan::math::assign(PEp_fic, minus(Qp)); - stan::math::assign(Qr_chosen, get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1)); - stan::math::assign(Qp_chosen, get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1)); - stan::math::assign(Qr, stan::model::deep_copy(add(Qr,multiply(get_base1(Arew,i,"Arew",1),PEr_fic)))); - stan::math::assign(Qp, stan::model::deep_copy(add(Qp,multiply(get_base1(Apun,i,"Apun",1),PEp_fic)))); - stan::model::assign(Qr, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qr_chosen + (get_base1(Arew,i,"Arew",1) * PEr)), - "assigning variable Qr"); - stan::model::assign(Qp, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qp_chosen + (get_base1(Apun,i,"Apun",1) * PEp)), - "assigning variable Qp"); - stan::math::assign(Qsum, add(Qr,Qp)); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - check_greater_or_equal(function__,"mu_R",mu_R,0); - check_greater_or_equal(function__,"mu_P",mu_P,0); - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - - // write generated quantities - vars__.push_back(mu_Arew); - vars__.push_back(mu_Apun); - vars__.push_back(mu_R); - vars__.push_back(mu_P); - vars__.push_back(mu_xi); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_bandit4arm_lapse"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_R"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_P"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_R"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_P"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_bart_par4_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_bart_par4"); - reader.add_event(128, 126, "end", "model_bart_par4"); - return reader; -} - -class model_bart_par4 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - int P; - vector > pumps; - vector > explosion; - vector > > d; -public: - model_bart_par4(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_bart_par4(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_bart_par4_namespace::model_bart_par4"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - context__.validate_dims("data initialization", "P", "int", context__.to_vec()); - P = int(0); - vals_i__ = context__.vals_i("P"); - pos__ = 0; - P = vals_i__[pos__++]; - validate_non_negative_index("pumps", "N", N); - validate_non_negative_index("pumps", "T", T); - context__.validate_dims("data initialization", "pumps", "int", context__.to_vec(N,T)); - validate_non_negative_index("pumps", "N", N); - validate_non_negative_index("pumps", "T", T); - pumps = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pumps"); - pos__ = 0; - size_t pumps_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pumps_limit_1__; ++i_1__) { - size_t pumps_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pumps_limit_0__; ++i_0__) { - pumps[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("explosion", "N", N); - validate_non_negative_index("explosion", "T", T); - context__.validate_dims("data initialization", "explosion", "int", context__.to_vec(N,T)); - validate_non_negative_index("explosion", "N", N); - validate_non_negative_index("explosion", "T", T); - explosion = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("explosion"); - pos__ = 0; - size_t explosion_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < explosion_limit_1__; ++i_1__) { - size_t explosion_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < explosion_limit_0__; ++i_0__) { - explosion[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],0); - } - check_greater_or_equal(function__,"P",P,2); - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pumps[k0__][k1__]",pumps[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"explosion[k0__][k1__]",explosion[k0__][k1__],0); - check_less_or_equal(function__,"explosion[k0__][k1__]",explosion[k0__][k1__],1); - } - } - // initialize data variables - validate_non_negative_index("d", "N", N); - validate_non_negative_index("d", "T", T); - validate_non_negative_index("d", "P", P); - d = std::vector > >(N,std::vector >(T,std::vector(P,int(0)))); - stan::math::fill(d, std::numeric_limits::min()); - - for (int j = 1; j <= N; ++j) { - - for (int k = 1; k <= get_base1(Tsubj,j,"Tsubj",1); ++k) { - - for (int l = 1; l <= P; ++l) { - - if (as_bool(logical_lte(l,get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2)))) { - stan::model::assign(d, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_uni(l), stan::model::nil_index_list()))), - 1, - "assigning variable d"); - } else { - stan::model::assign(d, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_uni(l), stan::model::nil_index_list()))), - 0, - "assigning variable d"); - } - } - } - } - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("phi_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("eta_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("gam_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_bart_par4() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("phi_p"))) - throw std::runtime_error("variable phi_p missing"); - vals_r__ = context__.vals_r("phi_p"); - pos__ = 0U; - validate_non_negative_index("phi_p", "N", N); - context__.validate_dims("initialization", "phi_p", "vector_d", context__.to_vec(N)); - vector_d phi_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - phi_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(phi_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable phi_p: ") + e.what()); - } - - if (!(context__.contains_r("eta_p"))) - throw std::runtime_error("variable eta_p missing"); - vals_r__ = context__.vals_r("eta_p"); - pos__ = 0U; - validate_non_negative_index("eta_p", "N", N); - context__.validate_dims("initialization", "eta_p", "vector_d", context__.to_vec(N)); - vector_d eta_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_p: ") + e.what()); - } - - if (!(context__.contains_r("gam_p"))) - throw std::runtime_error("variable gam_p missing"); - vals_r__ = context__.vals_r("gam_p"); - pos__ = 0U; - validate_non_negative_index("gam_p", "N", N); - context__.validate_dims("initialization", "gam_p", "vector_d", context__.to_vec(N)); - vector_d gam_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - gam_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(gam_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable gam_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix phi_p; - (void) phi_p; // dummy to suppress unused var warning - if (jacobian__) - phi_p = in__.vector_constrain(N,lp__); - else - phi_p = in__.vector_constrain(N); - - Eigen::Matrix eta_p; - (void) eta_p; // dummy to suppress unused var warning - if (jacobian__) - eta_p = in__.vector_constrain(N,lp__); - else - eta_p = in__.vector_constrain(N); - - Eigen::Matrix gam_p; - (void) gam_p; // dummy to suppress unused var warning - if (jacobian__) - gam_p = in__.vector_constrain(N,lp__); - else - gam_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("phi", "N", N); - Eigen::Matrix phi(static_cast(N)); - (void) phi; // dummy to suppress unused var warning - - stan::math::initialize(phi, DUMMY_VAR__); - stan::math::fill(phi,DUMMY_VAR__); - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - validate_non_negative_index("gam", "N", N); - Eigen::Matrix gam(static_cast(N)); - (void) gam; // dummy to suppress unused var warning - - stan::math::initialize(gam, DUMMY_VAR__); - stan::math::fill(gam,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - stan::math::assign(phi, Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),phi_p)))); - stan::math::assign(eta, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),eta_p)))); - stan::math::assign(gam, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gam_p)))); - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),tau_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(phi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: phi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(gam(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: gam" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"phi",phi,0); - check_less_or_equal(function__,"phi",phi,1); - check_greater_or_equal(function__,"eta",eta,0); - check_greater_or_equal(function__,"gam",gam,0); - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(phi_p, 0, 1)); - lp_accum__.add(normal_log(eta_p, 0, 1)); - lp_accum__.add(normal_log(gam_p, 0, 1)); - lp_accum__.add(normal_log(tau_p, 0, 1)); - for (int j = 1; j <= N; ++j) { - { - int n_succ(0); - (void) n_succ; // dummy to suppress unused var warning - - stan::math::fill(n_succ, std::numeric_limits::min()); - stan::math::assign(n_succ,0); - int n_pump(0); - (void) n_pump; // dummy to suppress unused var warning - - stan::math::fill(n_pump, std::numeric_limits::min()); - stan::math::assign(n_pump,0); - - - for (int k = 1; k <= get_base1(Tsubj,j,"Tsubj",1); ++k) { - { - local_scalar_t__ p_burst; - (void) p_burst; // dummy to suppress unused var warning - - stan::math::initialize(p_burst, DUMMY_VAR__); - stan::math::fill(p_burst,DUMMY_VAR__); - local_scalar_t__ omega; - (void) omega; // dummy to suppress unused var warning - - stan::math::initialize(omega, DUMMY_VAR__); - stan::math::fill(omega,DUMMY_VAR__); - - - stan::math::assign(p_burst, (1 - ((get_base1(phi,j,"phi",1) + (get_base1(eta,j,"eta",1) * n_succ)) / (1 + (get_base1(eta,j,"eta",1) * n_pump))))); - stan::math::assign(omega, (-(get_base1(gam,j,"gam",1)) / log1m(p_burst))); - for (int l = 1; l <= ((get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2) + 1) - get_base1(get_base1(explosion,j,"explosion",1),k,"explosion",2)); ++l) { - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(get_base1(d,j,"d",1),k,"d",2),l,"d",3), (get_base1(tau,j,"tau",1) * (omega - l)))); - } - stan::math::assign(n_succ, (n_succ + (get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2) - get_base1(get_base1(explosion,j,"explosion",1),k,"explosion",2)))); - stan::math::assign(n_pump, (n_pump + get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2))); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("phi_p"); - names__.push_back("eta_p"); - names__.push_back("gam_p"); - names__.push_back("tau_p"); - names__.push_back("phi"); - names__.push_back("eta"); - names__.push_back("gam"); - names__.push_back("tau"); - names__.push_back("mu_phi"); - names__.push_back("mu_eta"); - names__.push_back("mu_gam"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dims__.push_back(P); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_bart_par4_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d phi_p = in__.vector_constrain(N); - vector_d eta_p = in__.vector_constrain(N); - vector_d gam_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(phi_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gam_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("phi", "N", N); - Eigen::Matrix phi(static_cast(N)); - (void) phi; // dummy to suppress unused var warning - - stan::math::initialize(phi, DUMMY_VAR__); - stan::math::fill(phi,DUMMY_VAR__); - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - validate_non_negative_index("gam", "N", N); - Eigen::Matrix gam(static_cast(N)); - (void) gam; // dummy to suppress unused var warning - - stan::math::initialize(gam, DUMMY_VAR__); - stan::math::fill(gam,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - stan::math::assign(phi, Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),phi_p)))); - stan::math::assign(eta, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),eta_p)))); - stan::math::assign(gam, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gam_p)))); - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),tau_p)))); - - // validate transformed parameters - check_greater_or_equal(function__,"phi",phi,0); - check_less_or_equal(function__,"phi",phi,1); - check_greater_or_equal(function__,"eta",eta,0); - check_greater_or_equal(function__,"gam",gam,0); - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(phi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gam[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_phi; - (void) mu_phi; // dummy to suppress unused var warning - - stan::math::initialize(mu_phi, DUMMY_VAR__); - stan::math::fill(mu_phi,DUMMY_VAR__); - stan::math::assign(mu_phi,Phi_approx(get_base1(mu_p,1,"mu_p",1))); - local_scalar_t__ mu_eta; - (void) mu_eta; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta, DUMMY_VAR__); - stan::math::fill(mu_eta,DUMMY_VAR__); - stan::math::assign(mu_eta,stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - local_scalar_t__ mu_gam; - (void) mu_gam; // dummy to suppress unused var warning - - stan::math::initialize(mu_gam, DUMMY_VAR__); - stan::math::fill(mu_gam,DUMMY_VAR__); - stan::math::assign(mu_gam,stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - stan::math::assign(mu_tau,stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - stan::math::assign(log_lik,0); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - validate_non_negative_index("y_pred", "P", P); - vector > > y_pred(N, (vector >(T, (vector(P))))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int j = 1; j <= N; ++j) { - for (int k = 1; k <= T; ++k) { - for (int l = 1; l <= P; ++l) { - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_uni(l), stan::model::nil_index_list()))), - -(1), - "assigning variable y_pred"); - } - } - } - - for (int j = 1; j <= N; ++j) { - { - int n_succ(0); - (void) n_succ; // dummy to suppress unused var warning - - stan::math::fill(n_succ, std::numeric_limits::min()); - stan::math::assign(n_succ,0); - int n_pump(0); - (void) n_pump; // dummy to suppress unused var warning - - stan::math::fill(n_pump, std::numeric_limits::min()); - stan::math::assign(n_pump,0); - - - for (int k = 1; k <= get_base1(Tsubj,j,"Tsubj",1); ++k) { - { - local_scalar_t__ p_burst; - (void) p_burst; // dummy to suppress unused var warning - - stan::math::initialize(p_burst, DUMMY_VAR__); - stan::math::fill(p_burst,DUMMY_VAR__); - local_scalar_t__ omega; - (void) omega; // dummy to suppress unused var warning - - stan::math::initialize(omega, DUMMY_VAR__); - stan::math::fill(omega,DUMMY_VAR__); - - - stan::math::assign(p_burst, (1 - ((get_base1(phi,j,"phi",1) + (get_base1(eta,j,"eta",1) * n_succ)) / (1 + (get_base1(eta,j,"eta",1) * n_pump))))); - stan::math::assign(omega, (-(get_base1(gam,j,"gam",1)) / log1m(p_burst))); - for (int l = 1; l <= ((get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2) + 1) - get_base1(get_base1(explosion,j,"explosion",1),k,"explosion",2)); ++l) { - - stan::math::assign(log_lik, (log_lik + bernoulli_logit_log(get_base1(get_base1(get_base1(d,j,"d",1),k,"d",2),l,"d",3),(get_base1(tau,j,"tau",1) * (omega - l))))); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_uni(l), stan::model::nil_index_list()))), - bernoulli_logit_rng((get_base1(tau,j,"tau",1) * (omega - l)), base_rng__), - "assigning variable y_pred"); - } - stan::math::assign(n_succ, (n_succ + (get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2) - get_base1(get_base1(explosion,j,"explosion",1),k,"explosion",2)))); - stan::math::assign(n_pump, (n_pump + get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2))); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_phi",mu_phi,0); - check_greater_or_equal(function__,"mu_eta",mu_eta,0); - check_greater_or_equal(function__,"mu_gam",mu_gam,0); - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - - // write generated quantities - vars__.push_back(mu_phi); - vars__.push_back(mu_eta); - vars__.push_back(mu_gam); - vars__.push_back(mu_tau); - vars__.push_back(log_lik); - for (int k_2__ = 0; k_2__ < P; ++k_2__) { - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__][k_2__]); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_bart_par4"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_phi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gam"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_2__ = 1; k_2__ <= P; ++k_2__) { - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_phi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gam"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_2__ = 1; k_2__ <= P; ++k_2__) { - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_choiceRT_ddm_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_choiceRT_ddm"); - reader.add_event(97, 95, "end", "model_choiceRT_ddm"); - return reader; -} - -class model_choiceRT_ddm : public prob_grad { -private: - int N; - int Nu_max; - int Nl_max; - vector Nu; - vector Nl; - vector > RTu; - vector > RTl; - vector minRT; - double RTbound; -public: - model_choiceRT_ddm(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_choiceRT_ddm(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_choiceRT_ddm_namespace::model_choiceRT_ddm"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "Nu_max", "int", context__.to_vec()); - Nu_max = int(0); - vals_i__ = context__.vals_i("Nu_max"); - pos__ = 0; - Nu_max = vals_i__[pos__++]; - context__.validate_dims("data initialization", "Nl_max", "int", context__.to_vec()); - Nl_max = int(0); - vals_i__ = context__.vals_i("Nl_max"); - pos__ = 0; - Nl_max = vals_i__[pos__++]; - validate_non_negative_index("Nu", "N", N); - context__.validate_dims("data initialization", "Nu", "int", context__.to_vec(N)); - validate_non_negative_index("Nu", "N", N); - Nu = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Nu"); - pos__ = 0; - size_t Nu_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Nu_limit_0__; ++i_0__) { - Nu[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("Nl", "N", N); - context__.validate_dims("data initialization", "Nl", "int", context__.to_vec(N)); - validate_non_negative_index("Nl", "N", N); - Nl = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Nl"); - pos__ = 0; - size_t Nl_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Nl_limit_0__; ++i_0__) { - Nl[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("RTu", "N", N); - validate_non_negative_index("RTu", "Nu_max", Nu_max); - context__.validate_dims("data initialization", "RTu", "double", context__.to_vec(N,Nu_max)); - validate_non_negative_index("RTu", "N", N); - validate_non_negative_index("RTu", "Nu_max", Nu_max); - RTu = std::vector >(N,std::vector(Nu_max,double(0))); - vals_r__ = context__.vals_r("RTu"); - pos__ = 0; - size_t RTu_limit_1__ = Nu_max; - for (size_t i_1__ = 0; i_1__ < RTu_limit_1__; ++i_1__) { - size_t RTu_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < RTu_limit_0__; ++i_0__) { - RTu[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("RTl", "N", N); - validate_non_negative_index("RTl", "Nl_max", Nl_max); - context__.validate_dims("data initialization", "RTl", "double", context__.to_vec(N,Nl_max)); - validate_non_negative_index("RTl", "N", N); - validate_non_negative_index("RTl", "Nl_max", Nl_max); - RTl = std::vector >(N,std::vector(Nl_max,double(0))); - vals_r__ = context__.vals_r("RTl"); - pos__ = 0; - size_t RTl_limit_1__ = Nl_max; - for (size_t i_1__ = 0; i_1__ < RTl_limit_1__; ++i_1__) { - size_t RTl_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < RTl_limit_0__; ++i_0__) { - RTl[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("minRT", "N", N); - context__.validate_dims("data initialization", "minRT", "double", context__.to_vec(N)); - validate_non_negative_index("minRT", "N", N); - minRT = std::vector(N,double(0)); - vals_r__ = context__.vals_r("minRT"); - pos__ = 0; - size_t minRT_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < minRT_limit_0__; ++i_0__) { - minRT[i_0__] = vals_r__[pos__++]; - } - context__.validate_dims("data initialization", "RTbound", "double", context__.to_vec()); - RTbound = double(0); - vals_r__ = context__.vals_r("RTbound"); - pos__ = 0; - RTbound = vals_r__[pos__++]; - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"Nu_max",Nu_max,0); - check_greater_or_equal(function__,"Nl_max",Nl_max,0); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Nu[k0__]",Nu[k0__],0); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Nl[k0__]",Nl[k0__],0); - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("delta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_choiceRT_ddm() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("delta_pr"))) - throw std::runtime_error("variable delta_pr missing"); - vals_r__ = context__.vals_r("delta_pr"); - pos__ = 0U; - validate_non_negative_index("delta_pr", "N", N); - context__.validate_dims("initialization", "delta_pr", "vector_d", context__.to_vec(N)); - vector_d delta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - delta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(delta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable delta_pr: ") + e.what()); - } - - if (!(context__.contains_r("tau_pr"))) - throw std::runtime_error("variable tau_pr missing"); - vals_r__ = context__.vals_r("tau_pr"); - pos__ = 0U; - validate_non_negative_index("tau_pr", "N", N); - context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); - vector_d tau_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - Eigen::Matrix delta_pr; - (void) delta_pr; // dummy to suppress unused var warning - if (jacobian__) - delta_pr = in__.vector_constrain(N,lp__); - else - delta_pr = in__.vector_constrain(N); - - Eigen::Matrix tau_pr; - (void) tau_pr; // dummy to suppress unused var warning - if (jacobian__) - tau_pr = in__.vector_constrain(N,lp__); - else - tau_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - validate_non_negative_index("delta", "N", N); - Eigen::Matrix delta(static_cast(N)); - (void) delta; // dummy to suppress unused var warning - - stan::math::initialize(delta, DUMMY_VAR__); - stan::math::fill(delta,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))), - "assigning variable beta"); - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - ((Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * (get_base1(minRT,i,"minRT",1) - RTbound)) + RTbound), - "assigning variable tau"); - } - stan::math::assign(alpha, stan::math::exp(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr)))); - stan::math::assign(delta, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),delta_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(delta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: delta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"alpha",alpha,0); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,1); - check_greater_or_equal(function__,"delta",delta,0); - check_greater_or_equal(function__,"tau",tau,RTbound); - check_less_or_equal(function__,"tau",tau,max(minRT)); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - lp_accum__.add(normal_log(delta_pr, 0, 1)); - lp_accum__.add(normal_log(tau_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - - lp_accum__.add(wiener_log(stan::model::rvalue(RTu, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_max(get_base1(Nu,i,"Nu",1)), stan::model::nil_index_list())), "RTu"), get_base1(alpha,i,"alpha",1), get_base1(tau,i,"tau",1), get_base1(beta,i,"beta",1), get_base1(delta,i,"delta",1))); - lp_accum__.add(wiener_log(stan::model::rvalue(RTl, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_max(get_base1(Nl,i,"Nl",1)), stan::model::nil_index_list())), "RTl"), get_base1(alpha,i,"alpha",1), get_base1(tau,i,"tau",1), (1 - get_base1(beta,i,"beta",1)), -(get_base1(delta,i,"delta",1)))); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("delta_pr"); - names__.push_back("tau_pr"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("delta"); - names__.push_back("tau"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("mu_delta"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_choiceRT_ddm_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - vector_d delta_pr = in__.vector_constrain(N); - vector_d tau_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(delta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - validate_non_negative_index("delta", "N", N); - Eigen::Matrix delta(static_cast(N)); - (void) delta; // dummy to suppress unused var warning - - stan::math::initialize(delta, DUMMY_VAR__); - stan::math::fill(delta,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))), - "assigning variable beta"); - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - ((Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * (get_base1(minRT,i,"minRT",1) - RTbound)) + RTbound), - "assigning variable tau"); - } - stan::math::assign(alpha, stan::math::exp(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr)))); - stan::math::assign(delta, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),delta_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"alpha",alpha,0); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,1); - check_greater_or_equal(function__,"delta",delta,0); - check_greater_or_equal(function__,"tau",tau,RTbound); - check_less_or_equal(function__,"tau",tau,max(minRT)); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(delta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - local_scalar_t__ mu_delta; - (void) mu_delta; // dummy to suppress unused var warning - - stan::math::initialize(mu_delta, DUMMY_VAR__); - stan::math::fill(mu_delta,DUMMY_VAR__); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - - - stan::math::assign(mu_alpha, stan::math::exp(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_beta, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_delta, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - stan::math::assign(mu_tau, ((Phi_approx(get_base1(mu_p,4,"mu_p",1)) * (mean(minRT) - RTbound)) + RTbound)); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - wiener_log(stan::model::rvalue(RTu, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_max(get_base1(Nu,i,"Nu",1)), stan::model::nil_index_list())), "RTu"),get_base1(alpha,i,"alpha",1),get_base1(tau,i,"tau",1),get_base1(beta,i,"beta",1),get_base1(delta,i,"delta",1)), - "assigning variable log_lik"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + wiener_log(stan::model::rvalue(RTl, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_max(get_base1(Nl,i,"Nl",1)), stan::model::nil_index_list())), "RTl"),get_base1(alpha,i,"alpha",1),get_base1(tau,i,"tau",1),(1 - get_base1(beta,i,"beta",1)),-(get_base1(delta,i,"delta",1))))), - "assigning variable log_lik"); - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,1); - check_greater_or_equal(function__,"mu_delta",mu_delta,0); - check_greater_or_equal(function__,"mu_tau",mu_tau,RTbound); - check_less_or_equal(function__,"mu_tau",mu_tau,max(minRT)); - - // write generated quantities - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - vars__.push_back(mu_delta); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_choiceRT_ddm"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "delta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "delta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_delta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "delta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "delta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_delta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_choiceRT_ddm_single_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_choiceRT_ddm_single"); - reader.add_event(57, 55, "end", "model_choiceRT_ddm_single"); - return reader; -} - -class model_choiceRT_ddm_single : public prob_grad { -private: - int Nu; - int Nl; - vector RTu; - vector RTl; - double minRT; - double RTbound; -public: - model_choiceRT_ddm_single(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_choiceRT_ddm_single(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_choiceRT_ddm_single_namespace::model_choiceRT_ddm_single"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "Nu", "int", context__.to_vec()); - Nu = int(0); - vals_i__ = context__.vals_i("Nu"); - pos__ = 0; - Nu = vals_i__[pos__++]; - context__.validate_dims("data initialization", "Nl", "int", context__.to_vec()); - Nl = int(0); - vals_i__ = context__.vals_i("Nl"); - pos__ = 0; - Nl = vals_i__[pos__++]; - validate_non_negative_index("RTu", "Nu", Nu); - context__.validate_dims("data initialization", "RTu", "double", context__.to_vec(Nu)); - validate_non_negative_index("RTu", "Nu", Nu); - RTu = std::vector(Nu,double(0)); - vals_r__ = context__.vals_r("RTu"); - pos__ = 0; - size_t RTu_limit_0__ = Nu; - for (size_t i_0__ = 0; i_0__ < RTu_limit_0__; ++i_0__) { - RTu[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("RTl", "Nl", Nl); - context__.validate_dims("data initialization", "RTl", "double", context__.to_vec(Nl)); - validate_non_negative_index("RTl", "Nl", Nl); - RTl = std::vector(Nl,double(0)); - vals_r__ = context__.vals_r("RTl"); - pos__ = 0; - size_t RTl_limit_0__ = Nl; - for (size_t i_0__ = 0; i_0__ < RTl_limit_0__; ++i_0__) { - RTl[i_0__] = vals_r__[pos__++]; - } - context__.validate_dims("data initialization", "minRT", "double", context__.to_vec()); - minRT = double(0); - vals_r__ = context__.vals_r("minRT"); - pos__ = 0; - minRT = vals_r__[pos__++]; - context__.validate_dims("data initialization", "RTbound", "double", context__.to_vec()); - RTbound = double(0); - vals_r__ = context__.vals_r("RTbound"); - pos__ = 0; - RTbound = vals_r__[pos__++]; - - // validate, data variables - check_greater_or_equal(function__,"Nu",Nu,0); - check_greater_or_equal(function__,"Nl",Nl,0); - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - ++num_params_r__; - ++num_params_r__; - ++num_params_r__; - ++num_params_r__; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_choiceRT_ddm_single() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("alpha"))) - throw std::runtime_error("variable alpha missing"); - vals_r__ = context__.vals_r("alpha"); - pos__ = 0U; - context__.validate_dims("initialization", "alpha", "double", context__.to_vec()); - double alpha(0); - alpha = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,alpha); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha: ") + e.what()); - } - - if (!(context__.contains_r("beta"))) - throw std::runtime_error("variable beta missing"); - vals_r__ = context__.vals_r("beta"); - pos__ = 0U; - context__.validate_dims("initialization", "beta", "double", context__.to_vec()); - double beta(0); - beta = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,1,beta); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta: ") + e.what()); - } - - if (!(context__.contains_r("delta"))) - throw std::runtime_error("variable delta missing"); - vals_r__ = context__.vals_r("delta"); - pos__ = 0U; - context__.validate_dims("initialization", "delta", "double", context__.to_vec()); - double delta(0); - delta = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,delta); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable delta: ") + e.what()); - } - - if (!(context__.contains_r("tau"))) - throw std::runtime_error("variable tau missing"); - vals_r__ = context__.vals_r("tau"); - pos__ = 0U; - context__.validate_dims("initialization", "tau", "double", context__.to_vec()); - double tau(0); - tau = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(RTbound,minRT,tau); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ alpha; - (void) alpha; // dummy to suppress unused var warning - if (jacobian__) - alpha = in__.scalar_lb_constrain(0,lp__); - else - alpha = in__.scalar_lb_constrain(0); - - local_scalar_t__ beta; - (void) beta; // dummy to suppress unused var warning - if (jacobian__) - beta = in__.scalar_lub_constrain(0,1,lp__); - else - beta = in__.scalar_lub_constrain(0,1); - - local_scalar_t__ delta; - (void) delta; // dummy to suppress unused var warning - if (jacobian__) - delta = in__.scalar_lb_constrain(0,lp__); - else - delta = in__.scalar_lb_constrain(0); - - local_scalar_t__ tau; - (void) tau; // dummy to suppress unused var warning - if (jacobian__) - tau = in__.scalar_lub_constrain(RTbound,minRT,lp__); - else - tau = in__.scalar_lub_constrain(RTbound,minRT); - - - // transformed parameters - - - - // validate transformed parameters - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - - // model body - - lp_accum__.add(uniform_log(alpha, 0, 5)); - lp_accum__.add(uniform_log(beta, 0, 1)); - lp_accum__.add(normal_log(delta, 0, 2)); - lp_accum__.add(uniform_log(tau, 0.10000000000000001, minRT)); - lp_accum__.add(wiener_log(RTu, alpha, tau, beta, delta)); - lp_accum__.add(wiener_log(RTl, alpha, tau, (1 - beta), -(delta))); - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("delta"); - names__.push_back("tau"); - names__.push_back("log_lik"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_choiceRT_ddm_single_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double alpha = in__.scalar_lb_constrain(0); - double beta = in__.scalar_lub_constrain(0,1); - double delta = in__.scalar_lb_constrain(0); - double tau = in__.scalar_lub_constrain(RTbound,minRT); - vars__.push_back(alpha); - vars__.push_back(beta); - vars__.push_back(delta); - vars__.push_back(tau); - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - - - - // validate transformed parameters - - // write transformed parameters - if (include_tparams__) { - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - - - - stan::math::assign(log_lik, wiener_log(RTu,alpha,tau,beta,delta)); - stan::math::assign(log_lik, stan::model::deep_copy((log_lik + wiener_log(RTl,alpha,tau,(1 - beta),-(delta))))); - - // validate generated quantities - - // write generated quantities - vars__.push_back(log_lik); - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_choiceRT_ddm_single"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "delta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "tau"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "delta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "tau"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_choiceRT_lba_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_choiceRT_lba"); - reader.add_event(273, 271, "end", "model_choiceRT_lba"); - return reader; -} - -template -typename boost::math::tools::promote_args::type>::type -lba_pdf(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v_pdf, - const T4__& s, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - local_scalar_t__ b_A_tv_ts; - (void) b_A_tv_ts; // dummy to suppress unused var warning - - stan::math::initialize(b_A_tv_ts, DUMMY_VAR__); - stan::math::fill(b_A_tv_ts,DUMMY_VAR__); - local_scalar_t__ b_tv_ts; - (void) b_tv_ts; // dummy to suppress unused var warning - - stan::math::initialize(b_tv_ts, DUMMY_VAR__); - stan::math::fill(b_tv_ts,DUMMY_VAR__); - local_scalar_t__ term_1b; - (void) term_1b; // dummy to suppress unused var warning - - stan::math::initialize(term_1b, DUMMY_VAR__); - stan::math::fill(term_1b,DUMMY_VAR__); - local_scalar_t__ term_2b; - (void) term_2b; // dummy to suppress unused var warning - - stan::math::initialize(term_2b, DUMMY_VAR__); - stan::math::fill(term_2b,DUMMY_VAR__); - local_scalar_t__ term_3b; - (void) term_3b; // dummy to suppress unused var warning - - stan::math::initialize(term_3b, DUMMY_VAR__); - stan::math::fill(term_3b,DUMMY_VAR__); - local_scalar_t__ term_4b; - (void) term_4b; // dummy to suppress unused var warning - - stan::math::initialize(term_4b, DUMMY_VAR__); - stan::math::fill(term_4b,DUMMY_VAR__); - local_scalar_t__ pdf; - (void) pdf; // dummy to suppress unused var warning - - stan::math::initialize(pdf, DUMMY_VAR__); - stan::math::fill(pdf,DUMMY_VAR__); - - - stan::math::assign(b_A_tv_ts, (((b - A) - (t * v_pdf)) / (t * s))); - stan::math::assign(b_tv_ts, ((b - (t * v_pdf)) / (t * s))); - stan::math::assign(term_1b, (v_pdf * Phi(b_A_tv_ts))); - stan::math::assign(term_2b, (s * stan::math::exp(normal_log(stan::math::fabs(b_A_tv_ts),0,1)))); - stan::math::assign(term_3b, (v_pdf * Phi(b_tv_ts))); - stan::math::assign(term_4b, (s * stan::math::exp(normal_log(stan::math::fabs(b_tv_ts),0,1)))); - stan::math::assign(pdf, ((1 / A) * (((-(term_1b) + term_2b) + term_3b) - term_4b))); - return stan::math::promote_scalar(pdf); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_pdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v_pdf, - const T4__& s, std::ostream* pstream__) const { - return lba_pdf(t, b, A, v_pdf, s, pstream__); - } -}; - -template -typename boost::math::tools::promote_args::type>::type -lba_cdf(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v_cdf, - const T4__& s, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - local_scalar_t__ b_A_tv; - (void) b_A_tv; // dummy to suppress unused var warning - - stan::math::initialize(b_A_tv, DUMMY_VAR__); - stan::math::fill(b_A_tv,DUMMY_VAR__); - local_scalar_t__ b_tv; - (void) b_tv; // dummy to suppress unused var warning - - stan::math::initialize(b_tv, DUMMY_VAR__); - stan::math::fill(b_tv,DUMMY_VAR__); - local_scalar_t__ ts; - (void) ts; // dummy to suppress unused var warning - - stan::math::initialize(ts, DUMMY_VAR__); - stan::math::fill(ts,DUMMY_VAR__); - local_scalar_t__ term_1a; - (void) term_1a; // dummy to suppress unused var warning - - stan::math::initialize(term_1a, DUMMY_VAR__); - stan::math::fill(term_1a,DUMMY_VAR__); - local_scalar_t__ term_2a; - (void) term_2a; // dummy to suppress unused var warning - - stan::math::initialize(term_2a, DUMMY_VAR__); - stan::math::fill(term_2a,DUMMY_VAR__); - local_scalar_t__ term_3a; - (void) term_3a; // dummy to suppress unused var warning - - stan::math::initialize(term_3a, DUMMY_VAR__); - stan::math::fill(term_3a,DUMMY_VAR__); - local_scalar_t__ term_4a; - (void) term_4a; // dummy to suppress unused var warning - - stan::math::initialize(term_4a, DUMMY_VAR__); - stan::math::fill(term_4a,DUMMY_VAR__); - local_scalar_t__ cdf; - (void) cdf; // dummy to suppress unused var warning - - stan::math::initialize(cdf, DUMMY_VAR__); - stan::math::fill(cdf,DUMMY_VAR__); - - - stan::math::assign(b_A_tv, ((b - A) - (t * v_cdf))); - stan::math::assign(b_tv, (b - (t * v_cdf))); - stan::math::assign(ts, (t * s)); - stan::math::assign(term_1a, ((b_A_tv / A) * Phi((b_A_tv / ts)))); - stan::math::assign(term_2a, ((b_tv / A) * Phi((b_tv / ts)))); - stan::math::assign(term_3a, ((ts / A) * stan::math::exp(normal_log(stan::math::fabs((b_A_tv / ts)),0,1)))); - stan::math::assign(term_4a, ((ts / A) * stan::math::exp(normal_log(stan::math::fabs((b_tv / ts)),0,1)))); - stan::math::assign(cdf, ((((1 + term_1a) - term_2a) + term_3a) - term_4a)); - return stan::math::promote_scalar(cdf); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_cdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v_cdf, - const T4__& s, std::ostream* pstream__) const { - return lba_cdf(t, b, A, v_cdf, s, pstream__); - } -}; - -template -typename boost::math::tools::promote_args::type>::type -lba_lpdf(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - local_scalar_t__ t; - (void) t; // dummy to suppress unused var warning - - stan::math::initialize(t, DUMMY_VAR__); - stan::math::fill(t,DUMMY_VAR__); - local_scalar_t__ b; - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - local_scalar_t__ cdf; - (void) cdf; // dummy to suppress unused var warning - - stan::math::initialize(cdf, DUMMY_VAR__); - stan::math::fill(cdf,DUMMY_VAR__); - local_scalar_t__ pdf; - (void) pdf; // dummy to suppress unused var warning - - stan::math::initialize(pdf, DUMMY_VAR__); - stan::math::fill(pdf,DUMMY_VAR__); - validate_non_negative_index("prob", "cols(RT)", cols(RT)); - Eigen::Matrix prob(static_cast(cols(RT))); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ out; - (void) out; // dummy to suppress unused var warning - - stan::math::initialize(out, DUMMY_VAR__); - stan::math::fill(out,DUMMY_VAR__); - local_scalar_t__ prob_neg; - (void) prob_neg; // dummy to suppress unused var warning - - stan::math::initialize(prob_neg, DUMMY_VAR__); - stan::math::fill(prob_neg,DUMMY_VAR__); - - - stan::math::assign(b, (A + d)); - for (int i = 1; i <= cols(RT); ++i) { - - stan::math::assign(t, (get_base1(RT,1,i,"RT",1) - tau)); - if (as_bool(logical_gt(t,0))) { - - stan::math::assign(cdf, 1); - for (int j = 1; j <= num_elements(v); ++j) { - - if (as_bool(logical_eq(get_base1(RT,2,i,"RT",1),j))) { - - stan::math::assign(pdf, lba_pdf(t,b,A,get_base1(v,j,"v",1),s, pstream__)); - } else { - - stan::math::assign(cdf, stan::model::deep_copy((lba_cdf(t,b,A,get_base1(v,j,"v",1),s, pstream__) * cdf))); - } - } - stan::math::assign(prob_neg, 1); - for (int j = 1; j <= num_elements(v); ++j) { - - stan::math::assign(prob_neg, stan::model::deep_copy((Phi((-(get_base1(v,j,"v",1)) / s)) * prob_neg))); - } - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (pdf * (1 - cdf)), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(prob,i,"prob",1) / (1 - prob_neg))), - "assigning variable prob"); - if (as_bool(logical_lt(get_base1(prob,i,"prob",1),1e-10))) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 1e-10, - "assigning variable prob"); - } - } else { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 1e-10, - "assigning variable prob"); - } - } - stan::math::assign(out, sum(stan::math::log(prob))); - return stan::math::promote_scalar(out); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} -template -typename boost::math::tools::promote_args::type>::type -lba_lpdf(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) { - return lba_lpdf(RT,d,A,v,s,tau, pstream__); -} - - -struct lba_lpdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) const { - return lba_lpdf(RT, d, A, v, s, tau, pstream__); - } -}; - -template -Eigen::Matrix::type>::type, Eigen::Dynamic,1> -lba_rng(const T0__& d, - const T1__& A, - const Eigen::Matrix& v, - const T3__& s, - const T4__& tau, RNG& base_rng__, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - int get_pos_drift(0); - (void) get_pos_drift; // dummy to suppress unused var warning - - stan::math::fill(get_pos_drift, std::numeric_limits::min()); - int no_pos_drift(0); - (void) no_pos_drift; // dummy to suppress unused var warning - - stan::math::fill(no_pos_drift, std::numeric_limits::min()); - int get_first_pos(0); - (void) get_first_pos; // dummy to suppress unused var warning - - stan::math::fill(get_first_pos, std::numeric_limits::min()); - validate_non_negative_index("drift", "num_elements(v)", num_elements(v)); - Eigen::Matrix drift(static_cast(num_elements(v))); - (void) drift; // dummy to suppress unused var warning - - stan::math::initialize(drift, DUMMY_VAR__); - stan::math::fill(drift,DUMMY_VAR__); - int max_iter(0); - (void) max_iter; // dummy to suppress unused var warning - - stan::math::fill(max_iter, std::numeric_limits::min()); - int iter(0); - (void) iter; // dummy to suppress unused var warning - - stan::math::fill(iter, std::numeric_limits::min()); - validate_non_negative_index("start", "num_elements(v)", num_elements(v)); - vector start(num_elements(v)); - stan::math::initialize(start, DUMMY_VAR__); - stan::math::fill(start,DUMMY_VAR__); - validate_non_negative_index("ttf", "num_elements(v)", num_elements(v)); - vector ttf(num_elements(v)); - stan::math::initialize(ttf, DUMMY_VAR__); - stan::math::fill(ttf,DUMMY_VAR__); - validate_non_negative_index("resp", "num_elements(v)", num_elements(v)); - vector resp(num_elements(v), 0); - stan::math::fill(resp, std::numeric_limits::min()); - local_scalar_t__ rt; - (void) rt; // dummy to suppress unused var warning - - stan::math::initialize(rt, DUMMY_VAR__); - stan::math::fill(rt,DUMMY_VAR__); - validate_non_negative_index("pred", "2", 2); - Eigen::Matrix pred(static_cast(2)); - (void) pred; // dummy to suppress unused var warning - - stan::math::initialize(pred, DUMMY_VAR__); - stan::math::fill(pred,DUMMY_VAR__); - local_scalar_t__ b; - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - - - stan::math::assign(get_pos_drift, 1); - stan::math::assign(no_pos_drift, 0); - stan::math::assign(max_iter, 1000); - stan::math::assign(iter, 0); - while (as_bool(get_pos_drift)) { - - for (int j = 1; j <= num_elements(v); ++j) { - - stan::model::assign(drift, - stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), - normal_rng(get_base1(v,j,"v",1),s, base_rng__), - "assigning variable drift"); - if (as_bool(logical_gt(get_base1(drift,j,"drift",1),0))) { - - stan::math::assign(get_pos_drift, 0); - } - } - stan::math::assign(iter, stan::model::deep_copy((iter + 1))); - if (as_bool(logical_gt(iter,max_iter))) { - - stan::math::assign(get_pos_drift, 0); - stan::math::assign(no_pos_drift, 1); - } - } - if (as_bool(no_pos_drift)) { - - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - -(1), - "assigning variable pred"); - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - -(1), - "assigning variable pred"); - } else { - - stan::math::assign(b, (A + d)); - for (int i = 1; i <= num_elements(v); ++i) { - - stan::model::assign(start, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - uniform_rng(0,A, base_rng__), - "assigning variable start"); - stan::model::assign(ttf, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - ((b - get_base1(start,i,"start",1)) / get_base1(drift,i,"drift",1)), - "assigning variable ttf"); - } - stan::math::assign(resp, sort_indices_asc(ttf)); - stan::math::assign(ttf, stan::model::deep_copy(sort_asc(ttf))); - stan::math::assign(get_first_pos, 1); - stan::math::assign(iter, 1); - while (as_bool(get_first_pos)) { - - if (as_bool(logical_gt(get_base1(ttf,iter,"ttf",1),0))) { - - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - get_base1(ttf,iter,"ttf",1), - "assigning variable pred"); - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - get_base1(resp,iter,"resp",1), - "assigning variable pred"); - stan::math::assign(get_first_pos, 0); - } - stan::math::assign(iter, stan::model::deep_copy((iter + 1))); - } - } - return stan::math::promote_scalar(pred); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_rng_functor__ { - template - Eigen::Matrix::type>::type, Eigen::Dynamic,1> - operator()(const T0__& d, - const T1__& A, - const Eigen::Matrix& v, - const T3__& s, - const T4__& tau, RNG& base_rng__, std::ostream* pstream__) const { - return lba_rng(d, A, v, s, tau, base_rng__, pstream__); - } -}; - -class model_choiceRT_lba : public prob_grad { -private: - int N; - int Max_tr; - int N_choices; - int N_cond; - vector > N_tr_cond; - vector > RT; -public: - model_choiceRT_lba(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_choiceRT_lba(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_choiceRT_lba_namespace::model_choiceRT_lba"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "Max_tr", "int", context__.to_vec()); - Max_tr = int(0); - vals_i__ = context__.vals_i("Max_tr"); - pos__ = 0; - Max_tr = vals_i__[pos__++]; - context__.validate_dims("data initialization", "N_choices", "int", context__.to_vec()); - N_choices = int(0); - vals_i__ = context__.vals_i("N_choices"); - pos__ = 0; - N_choices = vals_i__[pos__++]; - context__.validate_dims("data initialization", "N_cond", "int", context__.to_vec()); - N_cond = int(0); - vals_i__ = context__.vals_i("N_cond"); - pos__ = 0; - N_cond = vals_i__[pos__++]; - validate_non_negative_index("N_tr_cond", "N", N); - validate_non_negative_index("N_tr_cond", "N_cond", N_cond); - context__.validate_dims("data initialization", "N_tr_cond", "int", context__.to_vec(N,N_cond)); - validate_non_negative_index("N_tr_cond", "N", N); - validate_non_negative_index("N_tr_cond", "N_cond", N_cond); - N_tr_cond = std::vector >(N,std::vector(N_cond,int(0))); - vals_i__ = context__.vals_i("N_tr_cond"); - pos__ = 0; - size_t N_tr_cond_limit_1__ = N_cond; - for (size_t i_1__ = 0; i_1__ < N_tr_cond_limit_1__; ++i_1__) { - size_t N_tr_cond_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < N_tr_cond_limit_0__; ++i_0__) { - N_tr_cond[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("RT", "N", N); - validate_non_negative_index("RT", "N_cond", N_cond); - validate_non_negative_index("RT", "2", 2); - validate_non_negative_index("RT", "Max_tr", Max_tr); - context__.validate_dims("data initialization", "RT", "matrix_d", context__.to_vec(N,N_cond,2,Max_tr)); - validate_non_negative_index("RT", "N", N); - validate_non_negative_index("RT", "N_cond", N_cond); - validate_non_negative_index("RT", "2", 2); - validate_non_negative_index("RT", "Max_tr", Max_tr); - RT = std::vector >(N,std::vector(N_cond,matrix_d(static_cast(2),static_cast(Max_tr)))); - vals_r__ = context__.vals_r("RT"); - pos__ = 0; - size_t RT_m_mat_lim__ = 2; - size_t RT_n_mat_lim__ = Max_tr; - for (size_t n_mat__ = 0; n_mat__ < RT_n_mat_lim__; ++n_mat__) { - for (size_t m_mat__ = 0; m_mat__ < RT_m_mat_lim__; ++m_mat__) { - size_t RT_limit_1__ = N_cond; - for (size_t i_1__ = 0; i_1__ < RT_limit_1__; ++i_1__) { - size_t RT_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < RT_limit_0__; ++i_0__) { - RT[i_0__][i_1__](m_mat__,n_mat__) = vals_r__[pos__++]; - } - } - } - } - - // validate, data variables - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - ++num_params_r__; - ++num_params_r__; - ++num_params_r__; - validate_non_negative_index("mu_v", "N_choices", N_choices); - validate_non_negative_index("mu_v", "N_cond", N_cond); - num_params_r__ += N_choices * N_cond; - ++num_params_r__; - ++num_params_r__; - ++num_params_r__; - validate_non_negative_index("sigma_v", "N_choices", N_choices); - validate_non_negative_index("sigma_v", "N_cond", N_cond); - num_params_r__ += N_choices * N_cond; - validate_non_negative_index("d", "N", N); - num_params_r__ += N; - validate_non_negative_index("A", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau", "N", N); - num_params_r__ += N; - validate_non_negative_index("v", "N_choices", N_choices); - validate_non_negative_index("v", "N", N); - validate_non_negative_index("v", "N_cond", N_cond); - num_params_r__ += N_choices * N * N_cond; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_choiceRT_lba() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_d"))) - throw std::runtime_error("variable mu_d missing"); - vals_r__ = context__.vals_r("mu_d"); - pos__ = 0U; - context__.validate_dims("initialization", "mu_d", "double", context__.to_vec()); - double mu_d(0); - mu_d = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,mu_d); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_d: ") + e.what()); - } - - if (!(context__.contains_r("mu_A"))) - throw std::runtime_error("variable mu_A missing"); - vals_r__ = context__.vals_r("mu_A"); - pos__ = 0U; - context__.validate_dims("initialization", "mu_A", "double", context__.to_vec()); - double mu_A(0); - mu_A = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,mu_A); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_A: ") + e.what()); - } - - if (!(context__.contains_r("mu_tau"))) - throw std::runtime_error("variable mu_tau missing"); - vals_r__ = context__.vals_r("mu_tau"); - pos__ = 0U; - context__.validate_dims("initialization", "mu_tau", "double", context__.to_vec()); - double mu_tau(0); - mu_tau = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,mu_tau); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_tau: ") + e.what()); - } - - if (!(context__.contains_r("mu_v"))) - throw std::runtime_error("variable mu_v missing"); - vals_r__ = context__.vals_r("mu_v"); - pos__ = 0U; - validate_non_negative_index("mu_v", "N_cond", N_cond); - validate_non_negative_index("mu_v", "N_choices", N_choices); - context__.validate_dims("initialization", "mu_v", "vector_d", context__.to_vec(N_cond,N_choices)); - std::vector mu_v(N_cond,vector_d(static_cast(N_choices))); - for (int j1__ = 0U; j1__ < N_choices; ++j1__) - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - mu_v[i0__](j1__) = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - try { - writer__.vector_lb_unconstrain(0,mu_v[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_v: ") + e.what()); - } - - if (!(context__.contains_r("sigma_d"))) - throw std::runtime_error("variable sigma_d missing"); - vals_r__ = context__.vals_r("sigma_d"); - pos__ = 0U; - context__.validate_dims("initialization", "sigma_d", "double", context__.to_vec()); - double sigma_d(0); - sigma_d = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,sigma_d); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma_d: ") + e.what()); - } - - if (!(context__.contains_r("sigma_A"))) - throw std::runtime_error("variable sigma_A missing"); - vals_r__ = context__.vals_r("sigma_A"); - pos__ = 0U; - context__.validate_dims("initialization", "sigma_A", "double", context__.to_vec()); - double sigma_A(0); - sigma_A = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,sigma_A); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma_A: ") + e.what()); - } - - if (!(context__.contains_r("sigma_tau"))) - throw std::runtime_error("variable sigma_tau missing"); - vals_r__ = context__.vals_r("sigma_tau"); - pos__ = 0U; - context__.validate_dims("initialization", "sigma_tau", "double", context__.to_vec()); - double sigma_tau(0); - sigma_tau = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,sigma_tau); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma_tau: ") + e.what()); - } - - if (!(context__.contains_r("sigma_v"))) - throw std::runtime_error("variable sigma_v missing"); - vals_r__ = context__.vals_r("sigma_v"); - pos__ = 0U; - validate_non_negative_index("sigma_v", "N_cond", N_cond); - validate_non_negative_index("sigma_v", "N_choices", N_choices); - context__.validate_dims("initialization", "sigma_v", "vector_d", context__.to_vec(N_cond,N_choices)); - std::vector sigma_v(N_cond,vector_d(static_cast(N_choices))); - for (int j1__ = 0U; j1__ < N_choices; ++j1__) - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - sigma_v[i0__](j1__) = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - try { - writer__.vector_lb_unconstrain(0,sigma_v[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma_v: ") + e.what()); - } - - if (!(context__.contains_r("d"))) - throw std::runtime_error("variable d missing"); - vals_r__ = context__.vals_r("d"); - pos__ = 0U; - validate_non_negative_index("d", "N", N); - context__.validate_dims("initialization", "d", "double", context__.to_vec(N)); - std::vector d(N,double(0)); - for (int i0__ = 0U; i0__ < N; ++i0__) - d[i0__] = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N; ++i0__) - try { - writer__.scalar_lb_unconstrain(0,d[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable d: ") + e.what()); - } - - if (!(context__.contains_r("A"))) - throw std::runtime_error("variable A missing"); - vals_r__ = context__.vals_r("A"); - pos__ = 0U; - validate_non_negative_index("A", "N", N); - context__.validate_dims("initialization", "A", "double", context__.to_vec(N)); - std::vector A(N,double(0)); - for (int i0__ = 0U; i0__ < N; ++i0__) - A[i0__] = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N; ++i0__) - try { - writer__.scalar_lb_unconstrain(0,A[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A: ") + e.what()); - } - - if (!(context__.contains_r("tau"))) - throw std::runtime_error("variable tau missing"); - vals_r__ = context__.vals_r("tau"); - pos__ = 0U; - validate_non_negative_index("tau", "N", N); - context__.validate_dims("initialization", "tau", "double", context__.to_vec(N)); - std::vector tau(N,double(0)); - for (int i0__ = 0U; i0__ < N; ++i0__) - tau[i0__] = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N; ++i0__) - try { - writer__.scalar_lb_unconstrain(0,tau[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau: ") + e.what()); - } - - if (!(context__.contains_r("v"))) - throw std::runtime_error("variable v missing"); - vals_r__ = context__.vals_r("v"); - pos__ = 0U; - validate_non_negative_index("v", "N", N); - validate_non_negative_index("v", "N_cond", N_cond); - validate_non_negative_index("v", "N_choices", N_choices); - context__.validate_dims("initialization", "v", "vector_d", context__.to_vec(N,N_cond,N_choices)); - std::vector > v(N,std::vector(N_cond,vector_d(static_cast(N_choices)))); - for (int j1__ = 0U; j1__ < N_choices; ++j1__) - for (int i1__ = 0U; i1__ < N_cond; ++i1__) - for (int i0__ = 0U; i0__ < N; ++i0__) - v[i0__][i1__](j1__) = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N; ++i0__) - for (int i1__ = 0U; i1__ < N_cond; ++i1__) - try { - writer__.vector_lb_unconstrain(0,v[i0__][i1__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable v: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ mu_d; - (void) mu_d; // dummy to suppress unused var warning - if (jacobian__) - mu_d = in__.scalar_lb_constrain(0,lp__); - else - mu_d = in__.scalar_lb_constrain(0); - - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - if (jacobian__) - mu_A = in__.scalar_lb_constrain(0,lp__); - else - mu_A = in__.scalar_lb_constrain(0); - - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - if (jacobian__) - mu_tau = in__.scalar_lb_constrain(0,lp__); - else - mu_tau = in__.scalar_lb_constrain(0); - - vector > mu_v; - size_t dim_mu_v_0__ = N_cond; - mu_v.reserve(dim_mu_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_mu_v_0__; ++k_0__) { - if (jacobian__) - mu_v.push_back(in__.vector_lb_constrain(0,N_choices,lp__)); - else - mu_v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - - local_scalar_t__ sigma_d; - (void) sigma_d; // dummy to suppress unused var warning - if (jacobian__) - sigma_d = in__.scalar_lb_constrain(0,lp__); - else - sigma_d = in__.scalar_lb_constrain(0); - - local_scalar_t__ sigma_A; - (void) sigma_A; // dummy to suppress unused var warning - if (jacobian__) - sigma_A = in__.scalar_lb_constrain(0,lp__); - else - sigma_A = in__.scalar_lb_constrain(0); - - local_scalar_t__ sigma_tau; - (void) sigma_tau; // dummy to suppress unused var warning - if (jacobian__) - sigma_tau = in__.scalar_lb_constrain(0,lp__); - else - sigma_tau = in__.scalar_lb_constrain(0); - - vector > sigma_v; - size_t dim_sigma_v_0__ = N_cond; - sigma_v.reserve(dim_sigma_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_sigma_v_0__; ++k_0__) { - if (jacobian__) - sigma_v.push_back(in__.vector_lb_constrain(0,N_choices,lp__)); - else - sigma_v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - - vector d; - size_t dim_d_0__ = N; - d.reserve(dim_d_0__); - for (size_t k_0__ = 0; k_0__ < dim_d_0__; ++k_0__) { - if (jacobian__) - d.push_back(in__.scalar_lb_constrain(0,lp__)); - else - d.push_back(in__.scalar_lb_constrain(0)); - } - - vector A; - size_t dim_A_0__ = N; - A.reserve(dim_A_0__); - for (size_t k_0__ = 0; k_0__ < dim_A_0__; ++k_0__) { - if (jacobian__) - A.push_back(in__.scalar_lb_constrain(0,lp__)); - else - A.push_back(in__.scalar_lb_constrain(0)); - } - - vector tau; - size_t dim_tau_0__ = N; - tau.reserve(dim_tau_0__); - for (size_t k_0__ = 0; k_0__ < dim_tau_0__; ++k_0__) { - if (jacobian__) - tau.push_back(in__.scalar_lb_constrain(0,lp__)); - else - tau.push_back(in__.scalar_lb_constrain(0)); - } - - vector > > v; - size_t dim_v_0__ = N; - v.resize(dim_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_v_0__; ++k_0__) { - size_t dim_v_1__ = N_cond; - v[k_0__].reserve(dim_v_1__); - for (size_t k_1__ = 0; k_1__ < dim_v_1__; ++k_1__) { - if (jacobian__) - v[k_0__].push_back(in__.vector_lb_constrain(0,N_choices,lp__)); - else - v[k_0__].push_back(in__.vector_lb_constrain(0,N_choices)); - } - } - - - // transformed parameters - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - - - stan::math::assign(s, 1); - - // validate transformed parameters - if (stan::math::is_uninitialized(s)) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: s"; - throw std::runtime_error(msg__.str()); - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - - // model body - - lp_accum__.add(normal_log(mu_d, 0.5, 1)); - if (mu_d < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 1)); - lp_accum__.add(normal_log(mu_A, 0.5, 1)); - if (mu_A < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 1)); - lp_accum__.add(normal_log(mu_tau, 0.5, 0.5)); - if (mu_tau < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 0.5)); - lp_accum__.add(gamma_log(sigma_d, 1, 1)); - lp_accum__.add(gamma_log(sigma_A, 1, 1)); - lp_accum__.add(gamma_log(sigma_tau, 1, 1)); - for (int j = 1; j <= N_cond; ++j) { - - for (int n = 1; n <= N_choices; ++n) { - - lp_accum__.add(normal_log(get_base1(get_base1(mu_v,j,"mu_v",1),n,"mu_v",2), 2, 1)); - if (get_base1(get_base1(mu_v,j,"mu_v",1),n,"mu_v",2) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 2, 1)); - lp_accum__.add(gamma_log(get_base1(get_base1(sigma_v,j,"sigma_v",1),n,"sigma_v",2), 1, 1)); - } - } - for (int i = 1; i <= N; ++i) { - { - int n_trials(0); - (void) n_trials; // dummy to suppress unused var warning - - stan::math::fill(n_trials, std::numeric_limits::min()); - - - lp_accum__.add(normal_log(get_base1(d,i,"d",1), mu_d, sigma_d)); - if (get_base1(d,i,"d",1) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, mu_d, sigma_d)); - lp_accum__.add(normal_log(get_base1(A,i,"A",1), mu_A, sigma_A)); - if (get_base1(A,i,"A",1) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, mu_A, sigma_A)); - lp_accum__.add(normal_log(get_base1(tau,i,"tau",1), mu_tau, sigma_tau)); - if (get_base1(tau,i,"tau",1) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, mu_tau, sigma_tau)); - for (int j = 1; j <= N_cond; ++j) { - - stan::math::assign(n_trials, get_base1(get_base1(N_tr_cond,i,"N_tr_cond",1),j,"N_tr_cond",2)); - for (int n = 1; n <= N_choices; ++n) { - - lp_accum__.add(normal_log(get_base1(get_base1(get_base1(v,i,"v",1),j,"v",2),n,"v",3), get_base1(get_base1(mu_v,j,"mu_v",1),n,"mu_v",2), get_base1(get_base1(sigma_v,j,"sigma_v",1),n,"sigma_v",2))); - if (get_base1(get_base1(get_base1(v,i,"v",1),j,"v",2),n,"v",3) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, get_base1(get_base1(mu_v,j,"mu_v",1),n,"mu_v",2), get_base1(get_base1(sigma_v,j,"sigma_v",1),n,"sigma_v",2))); - } - lp_accum__.add(lba_lpdf(stan::model::rvalue(RT, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_min_max(1, n_trials), stan::model::nil_index_list())))), "RT"), get_base1(d,i,"d",1), get_base1(A,i,"A",1), stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "v"), s, get_base1(tau,i,"tau",1), pstream__)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_d"); - names__.push_back("mu_A"); - names__.push_back("mu_tau"); - names__.push_back("mu_v"); - names__.push_back("sigma_d"); - names__.push_back("sigma_A"); - names__.push_back("sigma_tau"); - names__.push_back("sigma_v"); - names__.push_back("d"); - names__.push_back("A"); - names__.push_back("tau"); - names__.push_back("v"); - names__.push_back("s"); - names__.push_back("n_trials"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N_cond); - dims__.push_back(N_choices); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N_cond); - dims__.push_back(N_choices); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(N_cond); - dims__.push_back(N_choices); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(N_cond); - dims__.push_back(2); - dims__.push_back(Max_tr); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_choiceRT_lba_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double mu_d = in__.scalar_lb_constrain(0); - double mu_A = in__.scalar_lb_constrain(0); - double mu_tau = in__.scalar_lb_constrain(0); - vector mu_v; - size_t dim_mu_v_0__ = N_cond; - for (size_t k_0__ = 0; k_0__ < dim_mu_v_0__; ++k_0__) { - mu_v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - double sigma_d = in__.scalar_lb_constrain(0); - double sigma_A = in__.scalar_lb_constrain(0); - double sigma_tau = in__.scalar_lb_constrain(0); - vector sigma_v; - size_t dim_sigma_v_0__ = N_cond; - for (size_t k_0__ = 0; k_0__ < dim_sigma_v_0__; ++k_0__) { - sigma_v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - vector d; - size_t dim_d_0__ = N; - for (size_t k_0__ = 0; k_0__ < dim_d_0__; ++k_0__) { - d.push_back(in__.scalar_lb_constrain(0)); - } - vector A; - size_t dim_A_0__ = N; - for (size_t k_0__ = 0; k_0__ < dim_A_0__; ++k_0__) { - A.push_back(in__.scalar_lb_constrain(0)); - } - vector tau; - size_t dim_tau_0__ = N; - for (size_t k_0__ = 0; k_0__ < dim_tau_0__; ++k_0__) { - tau.push_back(in__.scalar_lb_constrain(0)); - } - vector > v; - size_t dim_v_0__ = N; - v.resize(dim_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_v_0__; ++k_0__) { - size_t dim_v_1__ = N_cond; - for (size_t k_1__ = 0; k_1__ < dim_v_1__; ++k_1__) { - v[k_0__].push_back(in__.vector_lb_constrain(0,N_choices)); - } - } - vars__.push_back(mu_d); - vars__.push_back(mu_A); - vars__.push_back(mu_tau); - for (int k_1__ = 0; k_1__ < N_choices; ++k_1__) { - for (int k_0__ = 0; k_0__ < N_cond; ++k_0__) { - vars__.push_back(mu_v[k_0__][k_1__]); - } - } - vars__.push_back(sigma_d); - vars__.push_back(sigma_A); - vars__.push_back(sigma_tau); - for (int k_1__ = 0; k_1__ < N_choices; ++k_1__) { - for (int k_0__ = 0; k_0__ < N_cond; ++k_0__) { - vars__.push_back(sigma_v[k_0__][k_1__]); - } - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(d[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - for (int k_2__ = 0; k_2__ < N_choices; ++k_2__) { - for (int k_1__ = 0; k_1__ < N_cond; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(v[k_0__][k_1__][k_2__]); - } - } - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - - - stan::math::assign(s, 1); - - // validate transformed parameters - - // write transformed parameters - if (include_tparams__) { - vars__.push_back(s); - } - if (!include_gqs__) return; - // declare and define generated quantities - int n_trials(0); - (void) n_trials; // dummy to suppress unused var warning - - stan::math::fill(n_trials, std::numeric_limits::min()); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "2", 2); - validate_non_negative_index("y_pred", "Max_tr", Max_tr); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "N_cond", N_cond); - vector > > y_pred(N, (vector >(N_cond, (Eigen::Matrix (static_cast(2),static_cast(Max_tr)))))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int j = 1; j <= N_cond; ++j) { - - for (int t = 1; t <= Max_tr; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())))), - rep_vector(-(1),2), - "assigning variable y_pred"); - } - } - } - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int j = 1; j <= N_cond; ++j) { - - stan::math::assign(n_trials, get_base1(get_base1(N_tr_cond,i,"N_tr_cond",1),j,"N_tr_cond",2)); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + lba_lpdf(stan::model::rvalue(RT, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_min_max(1, n_trials), stan::model::nil_index_list())))), "RT"),get_base1(d,i,"d",1),get_base1(A,i,"A",1),stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "v"),s,get_base1(tau,i,"tau",1), pstream__))), - "assigning variable log_lik"); - for (int t = 1; t <= n_trials; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())))), - lba_rng(get_base1(d,i,"d",1),get_base1(A,i,"A",1),stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "v"),s,get_base1(tau,i,"tau",1), base_rng__, pstream__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - - // write generated quantities - vars__.push_back(n_trials); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_3__ = 0; k_3__ < Max_tr; ++k_3__) { - for (int k_2__ = 0; k_2__ < 2; ++k_2__) { - for (int k_1__ = 0; k_1__ < N_cond; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__](k_2__, k_3__)); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_choiceRT_lba"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= N_choices; ++k_2__) { - for (int k_1__ = 1; k_1__ <= N_cond; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "v" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "n_trials"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_3__ = 1; k_3__ <= Max_tr; ++k_3__) { - for (int k_2__ = 1; k_2__ <= 2; ++k_2__) { - for (int k_1__ = 1; k_1__ <= N_cond; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__ << '.' << k_3__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= N_choices; ++k_2__) { - for (int k_1__ = 1; k_1__ <= N_cond; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "v" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "n_trials"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_3__ = 1; k_3__ <= Max_tr; ++k_3__) { - for (int k_2__ = 1; k_2__ <= 2; ++k_2__) { - for (int k_1__ = 1; k_1__ <= N_cond; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__ << '.' << k_3__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_choiceRT_lba_single_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_choiceRT_lba_single"); - reader.add_event(234, 232, "end", "model_choiceRT_lba_single"); - return reader; -} - -template -typename boost::math::tools::promote_args::type>::type -lba_pdf(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v, - const T4__& s, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - local_scalar_t__ b_A_tv_ts; - (void) b_A_tv_ts; // dummy to suppress unused var warning - - stan::math::initialize(b_A_tv_ts, DUMMY_VAR__); - stan::math::fill(b_A_tv_ts,DUMMY_VAR__); - local_scalar_t__ b_tv_ts; - (void) b_tv_ts; // dummy to suppress unused var warning - - stan::math::initialize(b_tv_ts, DUMMY_VAR__); - stan::math::fill(b_tv_ts,DUMMY_VAR__); - local_scalar_t__ term_1; - (void) term_1; // dummy to suppress unused var warning - - stan::math::initialize(term_1, DUMMY_VAR__); - stan::math::fill(term_1,DUMMY_VAR__); - local_scalar_t__ term_2; - (void) term_2; // dummy to suppress unused var warning - - stan::math::initialize(term_2, DUMMY_VAR__); - stan::math::fill(term_2,DUMMY_VAR__); - local_scalar_t__ term_3; - (void) term_3; // dummy to suppress unused var warning - - stan::math::initialize(term_3, DUMMY_VAR__); - stan::math::fill(term_3,DUMMY_VAR__); - local_scalar_t__ term_4; - (void) term_4; // dummy to suppress unused var warning - - stan::math::initialize(term_4, DUMMY_VAR__); - stan::math::fill(term_4,DUMMY_VAR__); - local_scalar_t__ pdf; - (void) pdf; // dummy to suppress unused var warning - - stan::math::initialize(pdf, DUMMY_VAR__); - stan::math::fill(pdf,DUMMY_VAR__); - - - stan::math::assign(b_A_tv_ts, (((b - A) - (t * v)) / (t * s))); - stan::math::assign(b_tv_ts, ((b - (t * v)) / (t * s))); - stan::math::assign(term_1, (v * Phi(b_A_tv_ts))); - stan::math::assign(term_2, (s * stan::math::exp(normal_log(b_A_tv_ts,0,1)))); - stan::math::assign(term_3, (v * Phi(b_tv_ts))); - stan::math::assign(term_4, (s * stan::math::exp(normal_log(b_tv_ts,0,1)))); - stan::math::assign(pdf, ((1 / A) * (((-(term_1) + term_2) + term_3) - term_4))); - return stan::math::promote_scalar(pdf); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_pdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v, - const T4__& s, std::ostream* pstream__) const { - return lba_pdf(t, b, A, v, s, pstream__); - } -}; - -template -typename boost::math::tools::promote_args::type>::type -lba_cdf(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v, - const T4__& s, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - local_scalar_t__ b_A_tv; - (void) b_A_tv; // dummy to suppress unused var warning - - stan::math::initialize(b_A_tv, DUMMY_VAR__); - stan::math::fill(b_A_tv,DUMMY_VAR__); - local_scalar_t__ b_tv; - (void) b_tv; // dummy to suppress unused var warning - - stan::math::initialize(b_tv, DUMMY_VAR__); - stan::math::fill(b_tv,DUMMY_VAR__); - local_scalar_t__ ts; - (void) ts; // dummy to suppress unused var warning - - stan::math::initialize(ts, DUMMY_VAR__); - stan::math::fill(ts,DUMMY_VAR__); - local_scalar_t__ term_1; - (void) term_1; // dummy to suppress unused var warning - - stan::math::initialize(term_1, DUMMY_VAR__); - stan::math::fill(term_1,DUMMY_VAR__); - local_scalar_t__ term_2; - (void) term_2; // dummy to suppress unused var warning - - stan::math::initialize(term_2, DUMMY_VAR__); - stan::math::fill(term_2,DUMMY_VAR__); - local_scalar_t__ term_3; - (void) term_3; // dummy to suppress unused var warning - - stan::math::initialize(term_3, DUMMY_VAR__); - stan::math::fill(term_3,DUMMY_VAR__); - local_scalar_t__ term_4; - (void) term_4; // dummy to suppress unused var warning - - stan::math::initialize(term_4, DUMMY_VAR__); - stan::math::fill(term_4,DUMMY_VAR__); - local_scalar_t__ cdf; - (void) cdf; // dummy to suppress unused var warning - - stan::math::initialize(cdf, DUMMY_VAR__); - stan::math::fill(cdf,DUMMY_VAR__); - - - stan::math::assign(b_A_tv, ((b - A) - (t * v))); - stan::math::assign(b_tv, (b - (t * v))); - stan::math::assign(ts, (t * s)); - stan::math::assign(term_1, ((b_A_tv / A) * Phi((b_A_tv / ts)))); - stan::math::assign(term_2, ((b_tv / A) * Phi((b_tv / ts)))); - stan::math::assign(term_3, ((ts / A) * stan::math::exp(normal_log((b_A_tv / ts),0,1)))); - stan::math::assign(term_4, ((ts / A) * stan::math::exp(normal_log((b_tv / ts),0,1)))); - stan::math::assign(cdf, ((((1 + term_1) - term_2) + term_3) - term_4)); - return stan::math::promote_scalar(cdf); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_cdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v, - const T4__& s, std::ostream* pstream__) const { - return lba_cdf(t, b, A, v, s, pstream__); - } -}; - -template -typename boost::math::tools::promote_args::type>::type -lba_lpdf(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - local_scalar_t__ t; - (void) t; // dummy to suppress unused var warning - - stan::math::initialize(t, DUMMY_VAR__); - stan::math::fill(t,DUMMY_VAR__); - local_scalar_t__ b; - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - local_scalar_t__ cdf; - (void) cdf; // dummy to suppress unused var warning - - stan::math::initialize(cdf, DUMMY_VAR__); - stan::math::fill(cdf,DUMMY_VAR__); - local_scalar_t__ pdf; - (void) pdf; // dummy to suppress unused var warning - - stan::math::initialize(pdf, DUMMY_VAR__); - stan::math::fill(pdf,DUMMY_VAR__); - validate_non_negative_index("prob", "rows(RT)", rows(RT)); - Eigen::Matrix prob(static_cast(rows(RT))); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ out; - (void) out; // dummy to suppress unused var warning - - stan::math::initialize(out, DUMMY_VAR__); - stan::math::fill(out,DUMMY_VAR__); - local_scalar_t__ prob_neg; - (void) prob_neg; // dummy to suppress unused var warning - - stan::math::initialize(prob_neg, DUMMY_VAR__); - stan::math::fill(prob_neg,DUMMY_VAR__); - - - stan::math::assign(b, (A + d)); - for (int i = 1; i <= rows(RT); ++i) { - - stan::math::assign(t, (get_base1(RT,1,i,"RT",1) - tau)); - if (as_bool(logical_gt(t,0))) { - - stan::math::assign(cdf, 1); - for (int j = 1; j <= num_elements(v); ++j) { - - if (as_bool(logical_eq(get_base1(RT,2,i,"RT",1),j))) { - - stan::math::assign(pdf, lba_pdf(t,b,A,get_base1(v,j,"v",1),s, pstream__)); - } else { - - stan::math::assign(cdf, stan::model::deep_copy(((1 - lba_cdf(t,b,A,get_base1(v,j,"v",1),s, pstream__)) * cdf))); - } - } - stan::math::assign(prob_neg, 1); - for (int j = 1; j <= num_elements(v); ++j) { - - stan::math::assign(prob_neg, stan::model::deep_copy((Phi((-(get_base1(v,j,"v",1)) / s)) * prob_neg))); - } - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (pdf * cdf), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(prob,i,"prob",1) / (1 - prob_neg))), - "assigning variable prob"); - if (as_bool(logical_lt(get_base1(prob,i,"prob",1),1e-10))) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 1e-10, - "assigning variable prob"); - } - } else { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 1e-10, - "assigning variable prob"); - } - } - stan::math::assign(out, sum(stan::math::log(prob))); - return stan::math::promote_scalar(out); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} -template -typename boost::math::tools::promote_args::type>::type -lba_lpdf(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) { - return lba_lpdf(RT,d,A,v,s,tau, pstream__); -} - - -struct lba_lpdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) const { - return lba_lpdf(RT, d, A, v, s, tau, pstream__); - } -}; - -template -Eigen::Matrix::type>::type, Eigen::Dynamic,1> -lba_rng(const T0__& d, - const T1__& A, - const Eigen::Matrix& v, - const T3__& s, - const T4__& tau, RNG& base_rng__, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - int get_pos_drift(0); - (void) get_pos_drift; // dummy to suppress unused var warning - - stan::math::fill(get_pos_drift, std::numeric_limits::min()); - int no_pos_drift(0); - (void) no_pos_drift; // dummy to suppress unused var warning - - stan::math::fill(no_pos_drift, std::numeric_limits::min()); - int get_first_pos(0); - (void) get_first_pos; // dummy to suppress unused var warning - - stan::math::fill(get_first_pos, std::numeric_limits::min()); - validate_non_negative_index("drift", "num_elements(v)", num_elements(v)); - Eigen::Matrix drift(static_cast(num_elements(v))); - (void) drift; // dummy to suppress unused var warning - - stan::math::initialize(drift, DUMMY_VAR__); - stan::math::fill(drift,DUMMY_VAR__); - int max_iter(0); - (void) max_iter; // dummy to suppress unused var warning - - stan::math::fill(max_iter, std::numeric_limits::min()); - int iter(0); - (void) iter; // dummy to suppress unused var warning - - stan::math::fill(iter, std::numeric_limits::min()); - validate_non_negative_index("start", "num_elements(v)", num_elements(v)); - vector start(num_elements(v)); - stan::math::initialize(start, DUMMY_VAR__); - stan::math::fill(start,DUMMY_VAR__); - validate_non_negative_index("ttf", "num_elements(v)", num_elements(v)); - vector ttf(num_elements(v)); - stan::math::initialize(ttf, DUMMY_VAR__); - stan::math::fill(ttf,DUMMY_VAR__); - validate_non_negative_index("resp", "num_elements(v)", num_elements(v)); - vector resp(num_elements(v), 0); - stan::math::fill(resp, std::numeric_limits::min()); - local_scalar_t__ rt; - (void) rt; // dummy to suppress unused var warning - - stan::math::initialize(rt, DUMMY_VAR__); - stan::math::fill(rt,DUMMY_VAR__); - validate_non_negative_index("pred", "2", 2); - Eigen::Matrix pred(static_cast(2)); - (void) pred; // dummy to suppress unused var warning - - stan::math::initialize(pred, DUMMY_VAR__); - stan::math::fill(pred,DUMMY_VAR__); - local_scalar_t__ b; - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - - - stan::math::assign(get_pos_drift, 1); - stan::math::assign(no_pos_drift, 0); - stan::math::assign(max_iter, 1000); - stan::math::assign(iter, 0); - while (as_bool(get_pos_drift)) { - - for (int j = 1; j <= num_elements(v); ++j) { - - stan::model::assign(drift, - stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), - normal_rng(get_base1(v,j,"v",1),s, base_rng__), - "assigning variable drift"); - if (as_bool(logical_gt(get_base1(drift,j,"drift",1),0))) { - - stan::math::assign(get_pos_drift, 0); - } - } - stan::math::assign(iter, stan::model::deep_copy((iter + 1))); - if (as_bool(logical_gt(iter,max_iter))) { - - stan::math::assign(get_pos_drift, 0); - stan::math::assign(no_pos_drift, 1); - } - } - if (as_bool(no_pos_drift)) { - - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - -(1), - "assigning variable pred"); - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - -(1), - "assigning variable pred"); - } else { - - stan::math::assign(b, (A + d)); - for (int i = 1; i <= num_elements(v); ++i) { - - stan::model::assign(start, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - uniform_rng(0,A, base_rng__), - "assigning variable start"); - stan::model::assign(ttf, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - ((b - get_base1(start,i,"start",1)) / get_base1(drift,i,"drift",1)), - "assigning variable ttf"); - } - stan::math::assign(resp, sort_indices_asc(ttf)); - stan::math::assign(ttf, stan::model::deep_copy(sort_asc(ttf))); - stan::math::assign(get_first_pos, 1); - stan::math::assign(iter, 1); - while (as_bool(get_first_pos)) { - - if (as_bool(logical_gt(get_base1(ttf,iter,"ttf",1),0))) { - - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (get_base1(ttf,iter,"ttf",1) + tau), - "assigning variable pred"); - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - get_base1(resp,iter,"resp",1), - "assigning variable pred"); - stan::math::assign(get_first_pos, 0); - } - stan::math::assign(iter, stan::model::deep_copy((iter + 1))); - } - } - return stan::math::promote_scalar(pred); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_rng_functor__ { - template - Eigen::Matrix::type>::type, Eigen::Dynamic,1> - operator()(const T0__& d, - const T1__& A, - const Eigen::Matrix& v, - const T3__& s, - const T4__& tau, RNG& base_rng__, std::ostream* pstream__) const { - return lba_rng(d, A, v, s, tau, base_rng__, pstream__); - } -}; - -class model_choiceRT_lba_single : public prob_grad { -private: - int Max_tr; - int N_choices; - int N_cond; - vector N_tr_cond; - vector RT; -public: - model_choiceRT_lba_single(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_choiceRT_lba_single(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_choiceRT_lba_single_namespace::model_choiceRT_lba_single"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "Max_tr", "int", context__.to_vec()); - Max_tr = int(0); - vals_i__ = context__.vals_i("Max_tr"); - pos__ = 0; - Max_tr = vals_i__[pos__++]; - context__.validate_dims("data initialization", "N_choices", "int", context__.to_vec()); - N_choices = int(0); - vals_i__ = context__.vals_i("N_choices"); - pos__ = 0; - N_choices = vals_i__[pos__++]; - context__.validate_dims("data initialization", "N_cond", "int", context__.to_vec()); - N_cond = int(0); - vals_i__ = context__.vals_i("N_cond"); - pos__ = 0; - N_cond = vals_i__[pos__++]; - validate_non_negative_index("N_tr_cond", "N_cond", N_cond); - context__.validate_dims("data initialization", "N_tr_cond", "int", context__.to_vec(N_cond)); - validate_non_negative_index("N_tr_cond", "N_cond", N_cond); - N_tr_cond = std::vector(N_cond,int(0)); - vals_i__ = context__.vals_i("N_tr_cond"); - pos__ = 0; - size_t N_tr_cond_limit_0__ = N_cond; - for (size_t i_0__ = 0; i_0__ < N_tr_cond_limit_0__; ++i_0__) { - N_tr_cond[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("RT", "N_cond", N_cond); - validate_non_negative_index("RT", "2", 2); - validate_non_negative_index("RT", "Max_tr", Max_tr); - context__.validate_dims("data initialization", "RT", "matrix_d", context__.to_vec(N_cond,2,Max_tr)); - validate_non_negative_index("RT", "N_cond", N_cond); - validate_non_negative_index("RT", "2", 2); - validate_non_negative_index("RT", "Max_tr", Max_tr); - RT = std::vector(N_cond,matrix_d(static_cast(2),static_cast(Max_tr))); - vals_r__ = context__.vals_r("RT"); - pos__ = 0; - size_t RT_m_mat_lim__ = 2; - size_t RT_n_mat_lim__ = Max_tr; - for (size_t n_mat__ = 0; n_mat__ < RT_n_mat_lim__; ++n_mat__) { - for (size_t m_mat__ = 0; m_mat__ < RT_m_mat_lim__; ++m_mat__) { - size_t RT_limit_0__ = N_cond; - for (size_t i_0__ = 0; i_0__ < RT_limit_0__; ++i_0__) { - RT[i_0__](m_mat__,n_mat__) = vals_r__[pos__++]; - } - } - } - - // validate, data variables - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - ++num_params_r__; - ++num_params_r__; - ++num_params_r__; - validate_non_negative_index("v", "N_choices", N_choices); - validate_non_negative_index("v", "N_cond", N_cond); - num_params_r__ += N_choices * N_cond; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_choiceRT_lba_single() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("d"))) - throw std::runtime_error("variable d missing"); - vals_r__ = context__.vals_r("d"); - pos__ = 0U; - context__.validate_dims("initialization", "d", "double", context__.to_vec()); - double d(0); - d = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,d); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable d: ") + e.what()); - } - - if (!(context__.contains_r("A"))) - throw std::runtime_error("variable A missing"); - vals_r__ = context__.vals_r("A"); - pos__ = 0U; - context__.validate_dims("initialization", "A", "double", context__.to_vec()); - double A(0); - A = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,A); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A: ") + e.what()); - } - - if (!(context__.contains_r("tau"))) - throw std::runtime_error("variable tau missing"); - vals_r__ = context__.vals_r("tau"); - pos__ = 0U; - context__.validate_dims("initialization", "tau", "double", context__.to_vec()); - double tau(0); - tau = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,tau); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau: ") + e.what()); - } - - if (!(context__.contains_r("v"))) - throw std::runtime_error("variable v missing"); - vals_r__ = context__.vals_r("v"); - pos__ = 0U; - validate_non_negative_index("v", "N_cond", N_cond); - validate_non_negative_index("v", "N_choices", N_choices); - context__.validate_dims("initialization", "v", "vector_d", context__.to_vec(N_cond,N_choices)); - std::vector v(N_cond,vector_d(static_cast(N_choices))); - for (int j1__ = 0U; j1__ < N_choices; ++j1__) - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - v[i0__](j1__) = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - try { - writer__.vector_lb_unconstrain(0,v[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable v: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ d; - (void) d; // dummy to suppress unused var warning - if (jacobian__) - d = in__.scalar_lb_constrain(0,lp__); - else - d = in__.scalar_lb_constrain(0); - - local_scalar_t__ A; - (void) A; // dummy to suppress unused var warning - if (jacobian__) - A = in__.scalar_lb_constrain(0,lp__); - else - A = in__.scalar_lb_constrain(0); - - local_scalar_t__ tau; - (void) tau; // dummy to suppress unused var warning - if (jacobian__) - tau = in__.scalar_lb_constrain(0,lp__); - else - tau = in__.scalar_lb_constrain(0); - - vector > v; - size_t dim_v_0__ = N_cond; - v.reserve(dim_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_v_0__; ++k_0__) { - if (jacobian__) - v.push_back(in__.vector_lb_constrain(0,N_choices,lp__)); - else - v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - - - // transformed parameters - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - - - stan::math::assign(s, 1); - - // validate transformed parameters - if (stan::math::is_uninitialized(s)) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: s"; - throw std::runtime_error(msg__.str()); - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - - // model body - { - int n_trials(0); - (void) n_trials; // dummy to suppress unused var warning - - stan::math::fill(n_trials, std::numeric_limits::min()); - - - lp_accum__.add(normal_log(d, 0.5, 1)); - if (d < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 1)); - lp_accum__.add(normal_log(A, 0.5, 1)); - if (A < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 1)); - lp_accum__.add(normal_log(tau, 0.5, 0.5)); - if (tau < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 0.5)); - for (int j = 1; j <= N_cond; ++j) { - - stan::math::assign(n_trials, get_base1(N_tr_cond,j,"N_tr_cond",1)); - for (int n = 1; n <= N_choices; ++n) { - - lp_accum__.add(normal_log(get_base1(get_base1(v,j,"v",1),n,"v",2), 2, 1)); - if (get_base1(get_base1(v,j,"v",1),n,"v",2) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 2, 1)); - } - lp_accum__.add(lba_lpdf(stan::model::rvalue(RT, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_min_max(1, n_trials), stan::model::nil_index_list()))), "RT"), d, A, stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "v"), s, tau, pstream__)); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("d"); - names__.push_back("A"); - names__.push_back("tau"); - names__.push_back("v"); - names__.push_back("s"); - names__.push_back("n_trials"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N_cond); - dims__.push_back(N_choices); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N_cond); - dims__.push_back(2); - dims__.push_back(Max_tr); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_choiceRT_lba_single_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double d = in__.scalar_lb_constrain(0); - double A = in__.scalar_lb_constrain(0); - double tau = in__.scalar_lb_constrain(0); - vector v; - size_t dim_v_0__ = N_cond; - for (size_t k_0__ = 0; k_0__ < dim_v_0__; ++k_0__) { - v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - vars__.push_back(d); - vars__.push_back(A); - vars__.push_back(tau); - for (int k_1__ = 0; k_1__ < N_choices; ++k_1__) { - for (int k_0__ = 0; k_0__ < N_cond; ++k_0__) { - vars__.push_back(v[k_0__][k_1__]); - } - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - - - stan::math::assign(s, 1); - - // validate transformed parameters - - // write transformed parameters - if (include_tparams__) { - vars__.push_back(s); - } - if (!include_gqs__) return; - // declare and define generated quantities - int n_trials(0); - (void) n_trials; // dummy to suppress unused var warning - - stan::math::fill(n_trials, std::numeric_limits::min()); - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "2", 2); - validate_non_negative_index("y_pred", "Max_tr", Max_tr); - validate_non_negative_index("y_pred", "N_cond", N_cond); - vector > y_pred(N_cond, (Eigen::Matrix (static_cast(2),static_cast(Max_tr)))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int j = 1; j <= N_cond; ++j) { - - for (int t = 1; t <= Max_tr; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - rep_vector(-(1),2), - "assigning variable y_pred"); - } - } - stan::math::assign(log_lik, 0); - - for (int j = 1; j <= N_cond; ++j) { - - stan::math::assign(n_trials, get_base1(N_tr_cond,j,"N_tr_cond",1)); - stan::math::assign(log_lik, stan::model::deep_copy((log_lik + lba_lpdf(stan::model::rvalue(RT, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_min_max(1, n_trials), stan::model::nil_index_list()))), "RT"),d,A,stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "v"),s,tau, pstream__)))); - for (int t = 1; t <= n_trials; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - lba_rng(d,A,stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "v"),s,tau, base_rng__, pstream__), - "assigning variable y_pred"); - } - } - - // validate generated quantities - - // write generated quantities - vars__.push_back(n_trials); - vars__.push_back(log_lik); - for (int k_2__ = 0; k_2__ < Max_tr; ++k_2__) { - for (int k_1__ = 0; k_1__ < 2; ++k_1__) { - for (int k_0__ = 0; k_0__ < N_cond; ++k_0__) { - vars__.push_back(y_pred[k_0__](k_1__, k_2__)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_choiceRT_lba_single"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "n_trials"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_2__ = 1; k_2__ <= Max_tr; ++k_2__) { - for (int k_1__ = 1; k_1__ <= 2; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "n_trials"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_2__ = 1; k_2__ <= Max_tr; ++k_2__) { - for (int k_1__ = 1; k_1__ <= 2; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_cra_exp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_cra_exp"); - reader.add_event(120, 118, "end", "model_cra_exp"); - return reader; -} - -template -typename boost::math::tools::promote_args::type>::type -subjective_value(const T0__& alpha, - const T1__& beta, - const T2__& p, - const T3__& a, - const T4__& v, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - - return stan::math::promote_scalar((pow(p,(1 + (beta * a))) * pow(v,alpha))); - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct subjective_value_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& alpha, - const T1__& beta, - const T2__& p, - const T3__& a, - const T4__& v, std::ostream* pstream__) const { - return subjective_value(alpha, beta, p, a, v, pstream__); - } -}; - -class model_cra_exp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > prob; - vector > ambig; - vector > reward_var; - vector > reward_fix; -public: - model_cra_exp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_cra_exp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_cra_exp_namespace::model_cra_exp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("prob", "N", N); - validate_non_negative_index("prob", "T", T); - context__.validate_dims("data initialization", "prob", "double", context__.to_vec(N,T)); - validate_non_negative_index("prob", "N", N); - validate_non_negative_index("prob", "T", T); - prob = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("prob"); - pos__ = 0; - size_t prob_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < prob_limit_1__; ++i_1__) { - size_t prob_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < prob_limit_0__; ++i_0__) { - prob[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("ambig", "N", N); - validate_non_negative_index("ambig", "T", T); - context__.validate_dims("data initialization", "ambig", "double", context__.to_vec(N,T)); - validate_non_negative_index("ambig", "N", N); - validate_non_negative_index("ambig", "T", T); - ambig = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("ambig"); - pos__ = 0; - size_t ambig_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < ambig_limit_1__; ++i_1__) { - size_t ambig_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < ambig_limit_0__; ++i_0__) { - ambig[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("reward_var", "N", N); - validate_non_negative_index("reward_var", "T", T); - context__.validate_dims("data initialization", "reward_var", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward_var", "N", N); - validate_non_negative_index("reward_var", "T", T); - reward_var = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward_var"); - pos__ = 0; - size_t reward_var_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_var_limit_1__; ++i_1__) { - size_t reward_var_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_var_limit_0__; ++i_0__) { - reward_var[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("reward_fix", "N", N); - validate_non_negative_index("reward_fix", "T", T); - context__.validate_dims("data initialization", "reward_fix", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward_fix", "N", N); - validate_non_negative_index("reward_fix", "T", T); - reward_fix = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward_fix"); - pos__ = 0; - size_t reward_fix_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_fix_limit_1__; ++i_1__) { - size_t reward_fix_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_fix_limit_0__; ++i_0__) { - reward_fix[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],0); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"prob[k0__][k1__]",prob[k0__][k1__],0); - check_less_or_equal(function__,"prob[k0__][k1__]",prob[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"ambig[k0__][k1__]",ambig[k0__][k1__],0); - check_less_or_equal(function__,"ambig[k0__][k1__]",ambig[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward_var[k0__][k1__]",reward_var[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward_fix[k0__][k1__]",reward_fix[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("gamma_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_cra_exp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("gamma_pr"))) - throw std::runtime_error("variable gamma_pr missing"); - vals_r__ = context__.vals_r("gamma_pr"); - pos__ = 0U; - validate_non_negative_index("gamma_pr", "N", N); - context__.validate_dims("initialization", "gamma_pr", "vector_d", context__.to_vec(N)); - vector_d gamma_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - gamma_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(gamma_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable gamma_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - Eigen::Matrix gamma_pr; - (void) gamma_pr; // dummy to suppress unused var warning - if (jacobian__) - gamma_pr = in__.vector_constrain(N,lp__); - else - gamma_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - validate_non_negative_index("gamma", "N", N); - Eigen::Matrix gamma(static_cast(N)); - (void) gamma; // dummy to suppress unused var warning - - stan::math::initialize(gamma, DUMMY_VAR__); - stan::math::fill(gamma,DUMMY_VAR__); - - - stan::math::assign(alpha, multiply(Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr))),2)); - stan::math::assign(beta, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),beta_pr))); - stan::math::assign(gamma, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gamma_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(gamma(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: gamma" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"gamma",gamma,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 5)); - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - lp_accum__.add(normal_log(gamma_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ u_fix; - (void) u_fix; // dummy to suppress unused var warning - - stan::math::initialize(u_fix, DUMMY_VAR__); - stan::math::fill(u_fix,DUMMY_VAR__); - local_scalar_t__ u_var; - (void) u_var; // dummy to suppress unused var warning - - stan::math::initialize(u_var, DUMMY_VAR__); - stan::math::fill(u_var,DUMMY_VAR__); - local_scalar_t__ p_var; - (void) p_var; // dummy to suppress unused var warning - - stan::math::initialize(p_var, DUMMY_VAR__); - stan::math::fill(p_var,DUMMY_VAR__); - - - stan::math::assign(u_fix, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),0.5,0,get_base1(get_base1(reward_fix,i,"reward_fix",1),t,"reward_fix",2), pstream__)); - stan::math::assign(u_var, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),get_base1(get_base1(prob,i,"prob",1),t,"prob",2),get_base1(get_base1(ambig,i,"ambig",1),t,"ambig",2),get_base1(get_base1(reward_var,i,"reward_var",1),t,"reward_var",2), pstream__)); - stan::math::assign(p_var, inv_logit((get_base1(gamma,i,"gamma",1) * (u_var - u_fix)))); - lp_accum__.add(bernoulli_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),p_var)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("gamma_pr"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("gamma"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("mu_gamma"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_cra_exp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - vector_d gamma_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gamma_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - validate_non_negative_index("gamma", "N", N); - Eigen::Matrix gamma(static_cast(N)); - (void) gamma; // dummy to suppress unused var warning - - stan::math::initialize(gamma, DUMMY_VAR__); - stan::math::fill(gamma,DUMMY_VAR__); - - - stan::math::assign(alpha, multiply(Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr))),2)); - stan::math::assign(beta, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),beta_pr))); - stan::math::assign(gamma, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gamma_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"gamma",gamma,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gamma[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - local_scalar_t__ mu_gamma; - (void) mu_gamma; // dummy to suppress unused var warning - - stan::math::initialize(mu_gamma, DUMMY_VAR__); - stan::math::fill(mu_gamma,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - stan::math::assign(mu_beta, get_base1(mu_p,2,"mu_p",1)); - stan::math::assign(mu_gamma, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ u_fix; - (void) u_fix; // dummy to suppress unused var warning - - stan::math::initialize(u_fix, DUMMY_VAR__); - stan::math::fill(u_fix,DUMMY_VAR__); - local_scalar_t__ u_var; - (void) u_var; // dummy to suppress unused var warning - - stan::math::initialize(u_var, DUMMY_VAR__); - stan::math::fill(u_var,DUMMY_VAR__); - local_scalar_t__ p_var; - (void) p_var; // dummy to suppress unused var warning - - stan::math::initialize(p_var, DUMMY_VAR__); - stan::math::fill(p_var,DUMMY_VAR__); - - - stan::math::assign(u_fix, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),0.5,0,get_base1(get_base1(reward_fix,i,"reward_fix",1),t,"reward_fix",2), pstream__)); - stan::math::assign(u_var, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),get_base1(get_base1(prob,i,"prob",1),t,"prob",2),get_base1(get_base1(ambig,i,"ambig",1),t,"ambig",2),get_base1(get_base1(reward_var,i,"reward_var",1),t,"reward_var",2), pstream__)); - stan::math::assign(p_var, inv_logit((get_base1(gamma,i,"gamma",1) * (u_var - u_fix)))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),p_var)), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(p_var, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - check_greater_or_equal(function__,"mu_gamma",mu_gamma,0); - - // write generated quantities - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - vars__.push_back(mu_gamma); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_cra_exp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gamma"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gamma"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_cra_linear_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_cra_linear"); - reader.add_event(120, 118, "end", "model_cra_linear"); - return reader; -} - -template -typename boost::math::tools::promote_args::type>::type -subjective_value(const T0__& alpha, - const T1__& beta, - const T2__& p, - const T3__& a, - const T4__& v, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - - return stan::math::promote_scalar(((p - ((beta * a) / 2)) * pow(v,alpha))); - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct subjective_value_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& alpha, - const T1__& beta, - const T2__& p, - const T3__& a, - const T4__& v, std::ostream* pstream__) const { - return subjective_value(alpha, beta, p, a, v, pstream__); - } -}; - -class model_cra_linear : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > prob; - vector > ambig; - vector > reward_var; - vector > reward_fix; -public: - model_cra_linear(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_cra_linear(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_cra_linear_namespace::model_cra_linear"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("prob", "N", N); - validate_non_negative_index("prob", "T", T); - context__.validate_dims("data initialization", "prob", "double", context__.to_vec(N,T)); - validate_non_negative_index("prob", "N", N); - validate_non_negative_index("prob", "T", T); - prob = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("prob"); - pos__ = 0; - size_t prob_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < prob_limit_1__; ++i_1__) { - size_t prob_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < prob_limit_0__; ++i_0__) { - prob[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("ambig", "N", N); - validate_non_negative_index("ambig", "T", T); - context__.validate_dims("data initialization", "ambig", "double", context__.to_vec(N,T)); - validate_non_negative_index("ambig", "N", N); - validate_non_negative_index("ambig", "T", T); - ambig = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("ambig"); - pos__ = 0; - size_t ambig_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < ambig_limit_1__; ++i_1__) { - size_t ambig_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < ambig_limit_0__; ++i_0__) { - ambig[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("reward_var", "N", N); - validate_non_negative_index("reward_var", "T", T); - context__.validate_dims("data initialization", "reward_var", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward_var", "N", N); - validate_non_negative_index("reward_var", "T", T); - reward_var = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward_var"); - pos__ = 0; - size_t reward_var_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_var_limit_1__; ++i_1__) { - size_t reward_var_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_var_limit_0__; ++i_0__) { - reward_var[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("reward_fix", "N", N); - validate_non_negative_index("reward_fix", "T", T); - context__.validate_dims("data initialization", "reward_fix", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward_fix", "N", N); - validate_non_negative_index("reward_fix", "T", T); - reward_fix = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward_fix"); - pos__ = 0; - size_t reward_fix_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_fix_limit_1__; ++i_1__) { - size_t reward_fix_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_fix_limit_0__; ++i_0__) { - reward_fix[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],0); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"prob[k0__][k1__]",prob[k0__][k1__],0); - check_less_or_equal(function__,"prob[k0__][k1__]",prob[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"ambig[k0__][k1__]",ambig[k0__][k1__],0); - check_less_or_equal(function__,"ambig[k0__][k1__]",ambig[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward_var[k0__][k1__]",reward_var[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward_fix[k0__][k1__]",reward_fix[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("gamma_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_cra_linear() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("gamma_pr"))) - throw std::runtime_error("variable gamma_pr missing"); - vals_r__ = context__.vals_r("gamma_pr"); - pos__ = 0U; - validate_non_negative_index("gamma_pr", "N", N); - context__.validate_dims("initialization", "gamma_pr", "vector_d", context__.to_vec(N)); - vector_d gamma_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - gamma_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(gamma_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable gamma_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - Eigen::Matrix gamma_pr; - (void) gamma_pr; // dummy to suppress unused var warning - if (jacobian__) - gamma_pr = in__.vector_constrain(N,lp__); - else - gamma_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - validate_non_negative_index("gamma", "N", N); - Eigen::Matrix gamma(static_cast(N)); - (void) gamma; // dummy to suppress unused var warning - - stan::math::initialize(gamma, DUMMY_VAR__); - stan::math::fill(gamma,DUMMY_VAR__); - - - stan::math::assign(alpha, multiply(Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr))),2)); - stan::math::assign(beta, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),beta_pr))); - stan::math::assign(gamma, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gamma_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(gamma(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: gamma" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"gamma",gamma,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 5)); - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - lp_accum__.add(normal_log(gamma_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ u_fix; - (void) u_fix; // dummy to suppress unused var warning - - stan::math::initialize(u_fix, DUMMY_VAR__); - stan::math::fill(u_fix,DUMMY_VAR__); - local_scalar_t__ u_var; - (void) u_var; // dummy to suppress unused var warning - - stan::math::initialize(u_var, DUMMY_VAR__); - stan::math::fill(u_var,DUMMY_VAR__); - local_scalar_t__ p_var; - (void) p_var; // dummy to suppress unused var warning - - stan::math::initialize(p_var, DUMMY_VAR__); - stan::math::fill(p_var,DUMMY_VAR__); - - - stan::math::assign(u_fix, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),0.5,0,get_base1(get_base1(reward_fix,i,"reward_fix",1),t,"reward_fix",2), pstream__)); - stan::math::assign(u_var, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),get_base1(get_base1(prob,i,"prob",1),t,"prob",2),get_base1(get_base1(ambig,i,"ambig",1),t,"ambig",2),get_base1(get_base1(reward_var,i,"reward_var",1),t,"reward_var",2), pstream__)); - stan::math::assign(p_var, inv_logit((get_base1(gamma,i,"gamma",1) * (u_var - u_fix)))); - lp_accum__.add(bernoulli_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),p_var)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("gamma_pr"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("gamma"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("mu_gamma"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_cra_linear_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - vector_d gamma_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gamma_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - validate_non_negative_index("gamma", "N", N); - Eigen::Matrix gamma(static_cast(N)); - (void) gamma; // dummy to suppress unused var warning - - stan::math::initialize(gamma, DUMMY_VAR__); - stan::math::fill(gamma,DUMMY_VAR__); - - - stan::math::assign(alpha, multiply(Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr))),2)); - stan::math::assign(beta, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),beta_pr))); - stan::math::assign(gamma, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gamma_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"gamma",gamma,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gamma[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - local_scalar_t__ mu_gamma; - (void) mu_gamma; // dummy to suppress unused var warning - - stan::math::initialize(mu_gamma, DUMMY_VAR__); - stan::math::fill(mu_gamma,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - stan::math::assign(mu_beta, get_base1(mu_p,2,"mu_p",1)); - stan::math::assign(mu_gamma, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ u_fix; - (void) u_fix; // dummy to suppress unused var warning - - stan::math::initialize(u_fix, DUMMY_VAR__); - stan::math::fill(u_fix,DUMMY_VAR__); - local_scalar_t__ u_var; - (void) u_var; // dummy to suppress unused var warning - - stan::math::initialize(u_var, DUMMY_VAR__); - stan::math::fill(u_var,DUMMY_VAR__); - local_scalar_t__ p_var; - (void) p_var; // dummy to suppress unused var warning - - stan::math::initialize(p_var, DUMMY_VAR__); - stan::math::fill(p_var,DUMMY_VAR__); - - - stan::math::assign(u_fix, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),0.5,0,get_base1(get_base1(reward_fix,i,"reward_fix",1),t,"reward_fix",2), pstream__)); - stan::math::assign(u_var, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),get_base1(get_base1(prob,i,"prob",1),t,"prob",2),get_base1(get_base1(ambig,i,"ambig",1),t,"ambig",2),get_base1(get_base1(reward_var,i,"reward_var",1),t,"reward_var",2), pstream__)); - stan::math::assign(p_var, inv_logit((get_base1(gamma,i,"gamma",1) * (u_var - u_fix)))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),p_var)), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(p_var, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - check_greater_or_equal(function__,"mu_gamma",mu_gamma,0); - - // write generated quantities - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - vars__.push_back(mu_gamma); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_cra_linear"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gamma"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gamma"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_cs_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_cs"); - reader.add_event(106, 104, "end", "model_dd_cs"); - return reader; -} - -class model_dd_cs : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > delay_later; - vector > amount_later; - vector > delay_sooner; - vector > amount_sooner; - vector > choice; -public: - model_dd_cs(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_cs(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_cs_namespace::model_dd_cs"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - delay_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_later_limit_1__; ++i_1__) { - size_t delay_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - amount_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_later_limit_1__; ++i_1__) { - size_t amount_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - delay_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_sooner_limit_1__; ++i_1__) { - size_t delay_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - amount_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_sooner_limit_1__; ++i_1__) { - size_t amount_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_later[k0__][k1__]",delay_later[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_later[k0__][k1__]",amount_later[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_sooner[k0__][k1__]",delay_sooner[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_sooner[k0__][k1__]",amount_sooner[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("r_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("s_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_cs() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("r_pr"))) - throw std::runtime_error("variable r_pr missing"); - vals_r__ = context__.vals_r("r_pr"); - pos__ = 0U; - validate_non_negative_index("r_pr", "N", N); - context__.validate_dims("initialization", "r_pr", "vector_d", context__.to_vec(N)); - vector_d r_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - r_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(r_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable r_pr: ") + e.what()); - } - - if (!(context__.contains_r("s_pr"))) - throw std::runtime_error("variable s_pr missing"); - vals_r__ = context__.vals_r("s_pr"); - pos__ = 0U; - validate_non_negative_index("s_pr", "N", N); - context__.validate_dims("initialization", "s_pr", "vector_d", context__.to_vec(N)); - vector_d s_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - s_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(s_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable s_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix r_pr; - (void) r_pr; // dummy to suppress unused var warning - if (jacobian__) - r_pr = in__.vector_constrain(N,lp__); - else - r_pr = in__.vector_constrain(N); - - Eigen::Matrix s_pr; - (void) s_pr; // dummy to suppress unused var warning - if (jacobian__) - s_pr = in__.vector_constrain(N,lp__); - else - s_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - validate_non_negative_index("s", "N", N); - Eigen::Matrix s(static_cast(N)); - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - stan::model::assign(s, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(s_pr,i,"s_pr",1)))) * 10), - "assigning variable s"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(r(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: r" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(s(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: s" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - check_greater_or_equal(function__,"s",s,0); - check_less_or_equal(function__,"s",s,10); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(r_pr, 0, 1)); - lp_accum__.add(normal_log(s_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) * stan::math::exp((-(1) * pow((get_base1(r,i,"r",1) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2)),get_base1(s,i,"s",1)))))); - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) * stan::math::exp((-(1) * pow((get_base1(r,i,"r",1) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2)),get_base1(s,i,"s",1)))))); - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), (get_base1(beta,i,"beta",1) * (ev_later - ev_sooner)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("r_pr"); - names__.push_back("s_pr"); - names__.push_back("beta_pr"); - names__.push_back("r"); - names__.push_back("s"); - names__.push_back("beta"); - names__.push_back("mu_r"); - names__.push_back("mu_s"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_cs_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d r_pr = in__.vector_constrain(N); - vector_d s_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(s_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - validate_non_negative_index("s", "N", N); - Eigen::Matrix s(static_cast(N)); - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - stan::model::assign(s, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(s_pr,i,"s_pr",1)))) * 10), - "assigning variable s"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - check_greater_or_equal(function__,"s",s,0); - check_less_or_equal(function__,"s",s,10); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(s[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_r; - (void) mu_r; // dummy to suppress unused var warning - - stan::math::initialize(mu_r, DUMMY_VAR__); - stan::math::fill(mu_r,DUMMY_VAR__); - local_scalar_t__ mu_s; - (void) mu_s; // dummy to suppress unused var warning - - stan::math::initialize(mu_s, DUMMY_VAR__); - stan::math::fill(mu_s,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_r, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_s, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 10)); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) * stan::math::exp((-(1) * pow((get_base1(r,i,"r",1) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2)),get_base1(s,i,"s",1)))))); - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) * stan::math::exp((-(1) * pow((get_base1(r,i,"r",1) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2)),get_base1(s,i,"s",1)))))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))), base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_r",mu_r,0); - check_less_or_equal(function__,"mu_r",mu_r,1); - check_greater_or_equal(function__,"mu_s",mu_s,0); - check_less_or_equal(function__,"mu_s",mu_s,10); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - - // write generated quantities - vars__.push_back(mu_r); - vars__.push_back(mu_s); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_cs"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_s"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_s"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_cs_single_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_cs_single"); - reader.add_event(62, 60, "end", "model_dd_cs_single"); - return reader; -} - -class model_dd_cs_single : public prob_grad { -private: - int Tsubj; - vector delay_later; - vector amount_later; - vector delay_sooner; - vector amount_sooner; - vector choice; -public: - model_dd_cs_single(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_cs_single(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_cs_single_namespace::model_dd_cs_single"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec()); - Tsubj = int(0); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - Tsubj = vals_i__[pos__++]; - validate_non_negative_index("delay_later", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("delay_later", "Tsubj", Tsubj); - delay_later = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("amount_later", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("amount_later", "Tsubj", Tsubj); - amount_later = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("delay_sooner", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("delay_sooner", "Tsubj", Tsubj); - delay_sooner = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("amount_sooner", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("amount_sooner", "Tsubj", Tsubj); - amount_sooner = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("choice", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(Tsubj)); - validate_non_negative_index("choice", "Tsubj", Tsubj); - choice = std::vector(Tsubj,int(0)); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__] = vals_i__[pos__++]; - } - - // validate, data variables - check_greater_or_equal(function__,"Tsubj",Tsubj,1); - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"delay_later[k0__]",delay_later[k0__],0); - } - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"amount_later[k0__]",amount_later[k0__],0); - } - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"delay_sooner[k0__]",delay_sooner[k0__],0); - } - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"amount_sooner[k0__]",amount_sooner[k0__],0); - } - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"choice[k0__]",choice[k0__],-(1)); - check_less_or_equal(function__,"choice[k0__]",choice[k0__],1); - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - ++num_params_r__; - ++num_params_r__; - ++num_params_r__; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_cs_single() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("r"))) - throw std::runtime_error("variable r missing"); - vals_r__ = context__.vals_r("r"); - pos__ = 0U; - context__.validate_dims("initialization", "r", "double", context__.to_vec()); - double r(0); - r = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,1,r); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable r: ") + e.what()); - } - - if (!(context__.contains_r("s"))) - throw std::runtime_error("variable s missing"); - vals_r__ = context__.vals_r("s"); - pos__ = 0U; - context__.validate_dims("initialization", "s", "double", context__.to_vec()); - double s(0); - s = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,10,s); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable s: ") + e.what()); - } - - if (!(context__.contains_r("beta"))) - throw std::runtime_error("variable beta missing"); - vals_r__ = context__.vals_r("beta"); - pos__ = 0U; - context__.validate_dims("initialization", "beta", "double", context__.to_vec()); - double beta(0); - beta = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,5,beta); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ r; - (void) r; // dummy to suppress unused var warning - if (jacobian__) - r = in__.scalar_lub_constrain(0,1,lp__); - else - r = in__.scalar_lub_constrain(0,1); - - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - if (jacobian__) - s = in__.scalar_lub_constrain(0,10,lp__); - else - s = in__.scalar_lub_constrain(0,10); - - local_scalar_t__ beta; - (void) beta; // dummy to suppress unused var warning - if (jacobian__) - beta = in__.scalar_lub_constrain(0,5,lp__); - else - beta = in__.scalar_lub_constrain(0,5); - - - // transformed parameters - validate_non_negative_index("ev_later", "Tsubj", Tsubj); - vector ev_later(Tsubj); - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - validate_non_negative_index("ev_sooner", "Tsubj", Tsubj); - vector ev_sooner(Tsubj); - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - for (int t = 1; t <= Tsubj; ++t) { - - stan::model::assign(ev_later, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_later,t,"amount_later",1) * stan::math::exp((-(1) * pow((r * get_base1(delay_later,t,"delay_later",1)),s)))), - "assigning variable ev_later"); - stan::model::assign(ev_sooner, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_sooner,t,"amount_sooner",1) * stan::math::exp((-(1) * pow((r * get_base1(delay_sooner,t,"delay_sooner",1)),s)))), - "assigning variable ev_sooner"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < Tsubj; ++i0__) { - if (stan::math::is_uninitialized(ev_later[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ev_later" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < Tsubj; ++i0__) { - if (stan::math::is_uninitialized(ev_sooner[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ev_sooner" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - - // model body - - lp_accum__.add(uniform_log(r, 0, 1)); - lp_accum__.add(uniform_log(s, 0, 10)); - lp_accum__.add(uniform_log(beta, 0, 5)); - for (int t = 1; t <= Tsubj; ++t) { - - lp_accum__.add(bernoulli_logit_log(get_base1(choice,t,"choice",1), (beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1))))); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("r"); - names__.push_back("s"); - names__.push_back("beta"); - names__.push_back("ev_later"); - names__.push_back("ev_sooner"); - names__.push_back("logR"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_cs_single_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double r = in__.scalar_lub_constrain(0,1); - double s = in__.scalar_lub_constrain(0,10); - double beta = in__.scalar_lub_constrain(0,5); - vars__.push_back(r); - vars__.push_back(s); - vars__.push_back(beta); - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("ev_later", "Tsubj", Tsubj); - vector ev_later(Tsubj); - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - validate_non_negative_index("ev_sooner", "Tsubj", Tsubj); - vector ev_sooner(Tsubj); - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - for (int t = 1; t <= Tsubj; ++t) { - - stan::model::assign(ev_later, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_later,t,"amount_later",1) * stan::math::exp((-(1) * pow((r * get_base1(delay_later,t,"delay_later",1)),s)))), - "assigning variable ev_later"); - stan::model::assign(ev_sooner, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_sooner,t,"amount_sooner",1) * stan::math::exp((-(1) * pow((r * get_base1(delay_sooner,t,"delay_sooner",1)),s)))), - "assigning variable ev_sooner"); - } - - // validate transformed parameters - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(ev_later[k_0__]); - } - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(ev_sooner[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ logR; - (void) logR; // dummy to suppress unused var warning - - stan::math::initialize(logR, DUMMY_VAR__); - stan::math::fill(logR,DUMMY_VAR__); - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "Tsubj", Tsubj); - vector y_pred(Tsubj); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - stan::math::assign(logR, stan::math::log(r)); - - stan::math::assign(log_lik, 0); - for (int t = 1; t <= Tsubj; ++t) { - - stan::math::assign(log_lik, stan::model::deep_copy((log_lik + bernoulli_logit_log(get_base1(choice,t,"choice",1),(beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1))))))); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - bernoulli_rng(inv_logit((beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1)))), base_rng__), - "assigning variable y_pred"); - } - - // validate generated quantities - - // write generated quantities - vars__.push_back(logR); - vars__.push_back(log_lik); - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(y_pred[k_0__]); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_cs_single"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_later" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_sooner" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "logR"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_later" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_sooner" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "logR"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_exp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_exp"); - reader.add_event(100, 98, "end", "model_dd_exp"); - return reader; -} - -class model_dd_exp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > delay_later; - vector > amount_later; - vector > delay_sooner; - vector > amount_sooner; - vector > choice; -public: - model_dd_exp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_exp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_exp_namespace::model_dd_exp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - delay_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_later_limit_1__; ++i_1__) { - size_t delay_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - amount_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_later_limit_1__; ++i_1__) { - size_t amount_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - delay_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_sooner_limit_1__; ++i_1__) { - size_t delay_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - amount_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_sooner_limit_1__; ++i_1__) { - size_t amount_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_later[k0__][k1__]",delay_later[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_later[k0__][k1__]",amount_later[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_sooner[k0__][k1__]",delay_sooner[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_sooner[k0__][k1__]",amount_sooner[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("r_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_exp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("r_pr"))) - throw std::runtime_error("variable r_pr missing"); - vals_r__ = context__.vals_r("r_pr"); - pos__ = 0U; - validate_non_negative_index("r_pr", "N", N); - context__.validate_dims("initialization", "r_pr", "vector_d", context__.to_vec(N)); - vector_d r_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - r_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(r_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable r_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix r_pr; - (void) r_pr; // dummy to suppress unused var warning - if (jacobian__) - r_pr = in__.vector_constrain(N,lp__); - else - r_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(r(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: r" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(r_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) * stan::math::exp(((-(1) * get_base1(r,i,"r",1)) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2))))); - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) * stan::math::exp(((-(1) * get_base1(r,i,"r",1)) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2))))); - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), (get_base1(beta,i,"beta",1) * (ev_later - ev_sooner)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("r_pr"); - names__.push_back("beta_pr"); - names__.push_back("r"); - names__.push_back("beta"); - names__.push_back("mu_r"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_exp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d r_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_r; - (void) mu_r; // dummy to suppress unused var warning - - stan::math::initialize(mu_r, DUMMY_VAR__); - stan::math::fill(mu_r,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_r, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) * stan::math::exp(((-(1) * get_base1(r,i,"r",1)) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2))))); - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) * stan::math::exp(((-(1) * get_base1(r,i,"r",1)) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2))))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))), base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_r",mu_r,0); - check_less_or_equal(function__,"mu_r",mu_r,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - - // write generated quantities - vars__.push_back(mu_r); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_exp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_hyperbolic_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_hyperbolic"); - reader.add_event(100, 98, "end", "model_dd_hyperbolic"); - return reader; -} - -class model_dd_hyperbolic : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > delay_later; - vector > amount_later; - vector > delay_sooner; - vector > amount_sooner; - vector > choice; -public: - model_dd_hyperbolic(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_hyperbolic(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_hyperbolic_namespace::model_dd_hyperbolic"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - delay_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_later_limit_1__; ++i_1__) { - size_t delay_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - amount_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_later_limit_1__; ++i_1__) { - size_t amount_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - delay_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_sooner_limit_1__; ++i_1__) { - size_t delay_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - amount_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_sooner_limit_1__; ++i_1__) { - size_t amount_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_later[k0__][k1__]",delay_later[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_later[k0__][k1__]",amount_later[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_sooner[k0__][k1__]",delay_sooner[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_sooner[k0__][k1__]",amount_sooner[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("k_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_hyperbolic() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("k_pr"))) - throw std::runtime_error("variable k_pr missing"); - vals_r__ = context__.vals_r("k_pr"); - pos__ = 0U; - validate_non_negative_index("k_pr", "N", N); - context__.validate_dims("initialization", "k_pr", "vector_d", context__.to_vec(N)); - vector_d k_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - k_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(k_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable k_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix k_pr; - (void) k_pr; // dummy to suppress unused var warning - if (jacobian__) - k_pr = in__.vector_constrain(N,lp__); - else - k_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("k", "N", N); - Eigen::Matrix k(static_cast(N)); - (void) k; // dummy to suppress unused var warning - - stan::math::initialize(k, DUMMY_VAR__); - stan::math::fill(k,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(k, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(k_pr,i,"k_pr",1)))), - "assigning variable k"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(k(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: k" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"k",k,0); - check_less_or_equal(function__,"k",k,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(k_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) / (1 + (get_base1(k,i,"k",1) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2))))); - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) / (1 + (get_base1(k,i,"k",1) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2))))); - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), (get_base1(beta,i,"beta",1) * (ev_later - ev_sooner)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("k_pr"); - names__.push_back("beta_pr"); - names__.push_back("k"); - names__.push_back("beta"); - names__.push_back("mu_k"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_hyperbolic_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d k_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(k_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("k", "N", N); - Eigen::Matrix k(static_cast(N)); - (void) k; // dummy to suppress unused var warning - - stan::math::initialize(k, DUMMY_VAR__); - stan::math::fill(k,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(k, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(k_pr,i,"k_pr",1)))), - "assigning variable k"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"k",k,0); - check_less_or_equal(function__,"k",k,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(k[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_k; - (void) mu_k; // dummy to suppress unused var warning - - stan::math::initialize(mu_k, DUMMY_VAR__); - stan::math::fill(mu_k,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_k, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) / (1 + (get_base1(k,i,"k",1) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2))))); - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) / (1 + (get_base1(k,i,"k",1) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2))))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))), base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_k",mu_k,0); - check_less_or_equal(function__,"mu_k",mu_k,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - - // write generated quantities - vars__.push_back(mu_k); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_hyperbolic"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "k_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "k" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_k"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "k_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "k" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_k"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_hyperbolic_single_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_hyperbolic_single"); - reader.add_event(56, 54, "end", "model_dd_hyperbolic_single"); - return reader; -} - -class model_dd_hyperbolic_single : public prob_grad { -private: - int Tsubj; - vector delay_later; - vector amount_later; - vector delay_sooner; - vector amount_sooner; - vector choice; -public: - model_dd_hyperbolic_single(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_hyperbolic_single(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_hyperbolic_single_namespace::model_dd_hyperbolic_single"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec()); - Tsubj = int(0); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - Tsubj = vals_i__[pos__++]; - validate_non_negative_index("delay_later", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("delay_later", "Tsubj", Tsubj); - delay_later = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("amount_later", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("amount_later", "Tsubj", Tsubj); - amount_later = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("delay_sooner", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("delay_sooner", "Tsubj", Tsubj); - delay_sooner = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("amount_sooner", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("amount_sooner", "Tsubj", Tsubj); - amount_sooner = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__] = vals_r__[pos__++]; - } - validate_non_negative_index("choice", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(Tsubj)); - validate_non_negative_index("choice", "Tsubj", Tsubj); - choice = std::vector(Tsubj,int(0)); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__] = vals_i__[pos__++]; - } - - // validate, data variables - check_greater_or_equal(function__,"Tsubj",Tsubj,1); - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"delay_later[k0__]",delay_later[k0__],0); - } - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"amount_later[k0__]",amount_later[k0__],0); - } - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"delay_sooner[k0__]",delay_sooner[k0__],0); - } - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"amount_sooner[k0__]",amount_sooner[k0__],0); - } - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"choice[k0__]",choice[k0__],-(1)); - check_less_or_equal(function__,"choice[k0__]",choice[k0__],1); - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - ++num_params_r__; - ++num_params_r__; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_hyperbolic_single() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("k"))) - throw std::runtime_error("variable k missing"); - vals_r__ = context__.vals_r("k"); - pos__ = 0U; - context__.validate_dims("initialization", "k", "double", context__.to_vec()); - double k(0); - k = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,1,k); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable k: ") + e.what()); - } - - if (!(context__.contains_r("beta"))) - throw std::runtime_error("variable beta missing"); - vals_r__ = context__.vals_r("beta"); - pos__ = 0U; - context__.validate_dims("initialization", "beta", "double", context__.to_vec()); - double beta(0); - beta = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,5,beta); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ k; - (void) k; // dummy to suppress unused var warning - if (jacobian__) - k = in__.scalar_lub_constrain(0,1,lp__); - else - k = in__.scalar_lub_constrain(0,1); - - local_scalar_t__ beta; - (void) beta; // dummy to suppress unused var warning - if (jacobian__) - beta = in__.scalar_lub_constrain(0,5,lp__); - else - beta = in__.scalar_lub_constrain(0,5); - - - // transformed parameters - validate_non_negative_index("ev_later", "Tsubj", Tsubj); - vector ev_later(Tsubj); - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - validate_non_negative_index("ev_sooner", "Tsubj", Tsubj); - vector ev_sooner(Tsubj); - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - for (int t = 1; t <= Tsubj; ++t) { - - stan::model::assign(ev_later, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_later,t,"amount_later",1) / (1 + (k * get_base1(delay_later,t,"delay_later",1)))), - "assigning variable ev_later"); - stan::model::assign(ev_sooner, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_sooner,t,"amount_sooner",1) / (1 + (k * get_base1(delay_sooner,t,"delay_sooner",1)))), - "assigning variable ev_sooner"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < Tsubj; ++i0__) { - if (stan::math::is_uninitialized(ev_later[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ev_later" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < Tsubj; ++i0__) { - if (stan::math::is_uninitialized(ev_sooner[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ev_sooner" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - - // model body - - lp_accum__.add(uniform_log(k, 0, 1)); - lp_accum__.add(uniform_log(beta, 0, 5)); - for (int t = 1; t <= Tsubj; ++t) { - - lp_accum__.add(bernoulli_logit_log(get_base1(choice,t,"choice",1), (beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1))))); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("k"); - names__.push_back("beta"); - names__.push_back("ev_later"); - names__.push_back("ev_sooner"); - names__.push_back("logK"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_hyperbolic_single_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double k = in__.scalar_lub_constrain(0,1); - double beta = in__.scalar_lub_constrain(0,5); - vars__.push_back(k); - vars__.push_back(beta); - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("ev_later", "Tsubj", Tsubj); - vector ev_later(Tsubj); - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - validate_non_negative_index("ev_sooner", "Tsubj", Tsubj); - vector ev_sooner(Tsubj); - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - for (int t = 1; t <= Tsubj; ++t) { - - stan::model::assign(ev_later, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_later,t,"amount_later",1) / (1 + (k * get_base1(delay_later,t,"delay_later",1)))), - "assigning variable ev_later"); - stan::model::assign(ev_sooner, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_sooner,t,"amount_sooner",1) / (1 + (k * get_base1(delay_sooner,t,"delay_sooner",1)))), - "assigning variable ev_sooner"); - } - - // validate transformed parameters - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(ev_later[k_0__]); - } - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(ev_sooner[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ logK; - (void) logK; // dummy to suppress unused var warning - - stan::math::initialize(logK, DUMMY_VAR__); - stan::math::fill(logK,DUMMY_VAR__); - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "Tsubj", Tsubj); - vector y_pred(Tsubj); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - stan::math::assign(logK, stan::math::log(k)); - - stan::math::assign(log_lik, 0); - for (int t = 1; t <= Tsubj; ++t) { - - stan::math::assign(log_lik, stan::model::deep_copy((log_lik + bernoulli_logit_log(get_base1(choice,t,"choice",1),(beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1))))))); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - bernoulli_rng(inv_logit((beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1)))), base_rng__), - "assigning variable y_pred"); - } - - // validate generated quantities - - // write generated quantities - vars__.push_back(logK); - vars__.push_back(log_lik); - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(y_pred[k_0__]); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_hyperbolic_single"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "k"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_later" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_sooner" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "logK"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "k"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_later" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_sooner" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "logK"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m1_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m1"); - reader.add_event(133, 131, "end", "model_gng_m1"); - return reader; -} - -class model_gng_m1 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m1(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m1(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m1_namespace::model_gng_m1"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m1() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m1_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),rho_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m1"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m1_reg_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m1_reg"); - reader.add_event(143, 141, "end", "model_gng_m1_reg"); - return reader; -} - -class model_gng_m1_reg : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m1_reg(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m1_reg(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m1_reg_namespace::model_gng_m1_reg"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m1_reg() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("Qgo"); - names__.push_back("Qnogo"); - names__.push_back("Wgo"); - names__.push_back("Wnogo"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m1_reg_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),rho_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("Qgo", "N", N); - validate_non_negative_index("Qgo", "T", T); - vector > Qgo(N, (vector(T))); - stan::math::initialize(Qgo, DUMMY_VAR__); - stan::math::fill(Qgo,DUMMY_VAR__); - validate_non_negative_index("Qnogo", "N", N); - validate_non_negative_index("Qnogo", "T", T); - vector > Qnogo(N, (vector(T))); - stan::math::initialize(Qnogo, DUMMY_VAR__); - stan::math::fill(Qnogo,DUMMY_VAR__); - validate_non_negative_index("Wgo", "N", N); - validate_non_negative_index("Wgo", "T", T); - vector > Wgo(N, (vector(T))); - stan::math::initialize(Wgo, DUMMY_VAR__); - stan::math::fill(Wgo,DUMMY_VAR__); - validate_non_negative_index("Wnogo", "N", N); - validate_non_negative_index("Wnogo", "T", T); - vector > Wnogo(N, (vector(T))); - stan::math::initialize(Wnogo, DUMMY_VAR__); - stan::math::fill(Wnogo,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - stan::model::assign(Qgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable Qgo"); - stan::model::assign(Qnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable Qnogo"); - stan::model::assign(Wgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1), - "assigning variable Wgo"); - stan::model::assign(Wnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1), - "assigning variable Wnogo"); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m1_reg"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m2_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m2"); - reader.add_event(144, 142, "end", "model_gng_m2"); - return reader; -} - -class model_gng_m2 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m2(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m2(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m2_namespace::model_gng_m2"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m2() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(get_base1(sigma,3,"sigma",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(sigma,4,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - (get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m2_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),rho_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - (get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m2"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m2_reg_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m2_reg"); - reader.add_event(154, 152, "end", "model_gng_m2_reg"); - return reader; -} - -class model_gng_m2_reg : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m2_reg(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m2_reg(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m2_reg_namespace::model_gng_m2_reg"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m2_reg() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(get_base1(sigma,3,"sigma",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(sigma,4,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - (get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("Qgo"); - names__.push_back("Qnogo"); - names__.push_back("Wgo"); - names__.push_back("Wnogo"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m2_reg_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),rho_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("Qgo", "N", N); - validate_non_negative_index("Qgo", "T", T); - vector > Qgo(N, (vector(T))); - stan::math::initialize(Qgo, DUMMY_VAR__); - stan::math::fill(Qgo,DUMMY_VAR__); - validate_non_negative_index("Qnogo", "N", N); - validate_non_negative_index("Qnogo", "T", T); - vector > Qnogo(N, (vector(T))); - stan::math::initialize(Qnogo, DUMMY_VAR__); - stan::math::fill(Qnogo,DUMMY_VAR__); - validate_non_negative_index("Wgo", "N", N); - validate_non_negative_index("Wgo", "T", T); - vector > Wgo(N, (vector(T))); - stan::math::initialize(Wgo, DUMMY_VAR__); - stan::math::fill(Wgo,DUMMY_VAR__); - validate_non_negative_index("Wnogo", "N", N); - validate_non_negative_index("Wnogo", "T", T); - vector > Wnogo(N, (vector(T))); - stan::math::initialize(Wnogo, DUMMY_VAR__); - stan::math::fill(Wnogo,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - (get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - stan::model::assign(Qgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable Qgo"); - stan::model::assign(Qnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable Qnogo"); - stan::model::assign(Wgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1), - "assigning variable Wgo"); - stan::model::assign(Wnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1), - "assigning variable Wnogo"); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m2_reg"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m3_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m3"); - reader.add_event(161, 159, "end", "model_gng_m3"); - return reader; -} - -class model_gng_m3 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m3(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m3(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m3_namespace::model_gng_m3"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "5", 5); - num_params_r__ += 5; - validate_non_negative_index("sigma", "5", 5); - num_params_r__ += 5; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m3() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "5", 5); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(5)); - vector_d mu_p(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "5", 5); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(5)); - vector_d sigma(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(5,lp__); - else - mu_p = in__.vector_constrain(5); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,5,lp__); - else - sigma = in__.vector_lb_constrain(0,5); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,5,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(3, 4), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(sigma,5,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - lp_accum__.add(normal_log(pi_pr, 0, 1.0)); - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::math::assign(sv, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("pi_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("pi"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_pi"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m3_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(5); - vector_d sigma = in__.vector_lb_constrain(0,5); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rho_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - stan::math::assign(mu_pi, get_base1(mu_p,4,"mu_p",1)); - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,5,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::math::assign(sv, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_pi); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m3"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m3_reg_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m3_reg"); - reader.add_event(173, 171, "end", "model_gng_m3_reg"); - return reader; -} - -class model_gng_m3_reg : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m3_reg(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m3_reg(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m3_reg_namespace::model_gng_m3_reg"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "5", 5); - num_params_r__ += 5; - validate_non_negative_index("sigma", "5", 5); - num_params_r__ += 5; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m3_reg() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "5", 5); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(5)); - vector_d mu_p(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "5", 5); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(5)); - vector_d sigma(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(5,lp__); - else - mu_p = in__.vector_constrain(5); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,5,lp__); - else - sigma = in__.vector_lb_constrain(0,5); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,5,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(3, 4), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(sigma,5,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - lp_accum__.add(normal_log(pi_pr, 0, 1.0)); - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::math::assign(sv, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("pi_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("pi"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_pi"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("Qgo"); - names__.push_back("Qnogo"); - names__.push_back("Wgo"); - names__.push_back("Wnogo"); - names__.push_back("SV"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m3_reg_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(5); - vector_d sigma = in__.vector_lb_constrain(0,5); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rho_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("Qgo", "N", N); - validate_non_negative_index("Qgo", "T", T); - vector > Qgo(N, (vector(T))); - stan::math::initialize(Qgo, DUMMY_VAR__); - stan::math::fill(Qgo,DUMMY_VAR__); - validate_non_negative_index("Qnogo", "N", N); - validate_non_negative_index("Qnogo", "T", T); - vector > Qnogo(N, (vector(T))); - stan::math::initialize(Qnogo, DUMMY_VAR__); - stan::math::fill(Qnogo,DUMMY_VAR__); - validate_non_negative_index("Wgo", "N", N); - validate_non_negative_index("Wgo", "T", T); - vector > Wgo(N, (vector(T))); - stan::math::initialize(Wgo, DUMMY_VAR__); - stan::math::fill(Wgo,DUMMY_VAR__); - validate_non_negative_index("Wnogo", "N", N); - validate_non_negative_index("Wnogo", "T", T); - vector > Wnogo(N, (vector(T))); - stan::math::initialize(Wnogo, DUMMY_VAR__); - stan::math::fill(Wnogo,DUMMY_VAR__); - validate_non_negative_index("SV", "N", N); - validate_non_negative_index("SV", "T", T); - vector > SV(N, (vector(T))); - stan::math::initialize(SV, DUMMY_VAR__); - stan::math::fill(SV,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - stan::math::assign(mu_pi, get_base1(mu_p,4,"mu_p",1)); - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,5,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::math::assign(sv, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - stan::model::assign(Qgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable Qgo"); - stan::model::assign(Qnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable Qnogo"); - stan::model::assign(Wgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1), - "assigning variable Wgo"); - stan::model::assign(Wnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1), - "assigning variable Wnogo"); - stan::model::assign(SV, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1), - "assigning variable SV"); - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_pi); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(SV[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m3_reg"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "SV" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "SV" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m4_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m4"); - reader.add_event(192, 190, "end", "model_gng_m4"); - return reader; -} - -class model_gng_m4 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m4(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m4(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m4_namespace::model_gng_m4"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "6", 6); - num_params_r__ += 6; - validate_non_negative_index("sigma", "6", 6); - num_params_r__ += 6; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rhoRew_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rhoPun_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m4() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "6", 6); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(6)); - vector_d mu_p(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "6", 6); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); - vector_d sigma(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rhoRew_pr"))) - throw std::runtime_error("variable rhoRew_pr missing"); - vals_r__ = context__.vals_r("rhoRew_pr"); - pos__ = 0U; - validate_non_negative_index("rhoRew_pr", "N", N); - context__.validate_dims("initialization", "rhoRew_pr", "vector_d", context__.to_vec(N)); - vector_d rhoRew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rhoRew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rhoRew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rhoRew_pr: ") + e.what()); - } - - if (!(context__.contains_r("rhoPun_pr"))) - throw std::runtime_error("variable rhoPun_pr missing"); - vals_r__ = context__.vals_r("rhoPun_pr"); - pos__ = 0U; - validate_non_negative_index("rhoPun_pr", "N", N); - context__.validate_dims("initialization", "rhoPun_pr", "vector_d", context__.to_vec(N)); - vector_d rhoPun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rhoPun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rhoPun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rhoPun_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(6,lp__); - else - mu_p = in__.vector_constrain(6); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,6,lp__); - else - sigma = in__.vector_lb_constrain(0,6); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix rhoRew_pr; - (void) rhoRew_pr; // dummy to suppress unused var warning - if (jacobian__) - rhoRew_pr = in__.vector_constrain(N,lp__); - else - rhoRew_pr = in__.vector_constrain(N); - - Eigen::Matrix rhoPun_pr; - (void) rhoPun_pr; // dummy to suppress unused var warning - if (jacobian__) - rhoPun_pr = in__.vector_constrain(N,lp__); - else - rhoPun_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("rhoRew", "N", N); - Eigen::Matrix rhoRew(static_cast(N)); - (void) rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(rhoRew, DUMMY_VAR__); - stan::math::fill(rhoRew,DUMMY_VAR__); - validate_non_negative_index("rhoPun", "N", N); - Eigen::Matrix rhoPun(static_cast(N)); - (void) rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(rhoPun, DUMMY_VAR__); - stan::math::fill(rhoPun,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - stan::math::assign(rhoRew, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rhoRew_pr)))); - stan::math::assign(rhoPun, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),rhoPun_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rhoRew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rhoRew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rhoPun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rhoPun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rhoRew",rhoRew,0); - check_greater_or_equal(function__,"rhoPun",rhoPun,0); - - // model body - - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,5,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,6,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(3, 4), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(5, 6), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - lp_accum__.add(normal_log(pi_pr, 0, 1.0)); - lp_accum__.add(normal_log(rhoRew_pr, 0, 1.0)); - lp_accum__.add(normal_log(rhoPun_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::math::assign(sv, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } else { - - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } - } else { - - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("pi_pr"); - names__.push_back("rhoRew_pr"); - names__.push_back("rhoPun_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("pi"); - names__.push_back("rhoRew"); - names__.push_back("rhoPun"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_pi"); - names__.push_back("mu_rhoRew"); - names__.push_back("mu_rhoPun"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m4_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(6); - vector_d sigma = in__.vector_lb_constrain(0,6); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d rhoRew_pr = in__.vector_constrain(N); - vector_d rhoPun_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoRew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoPun_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("rhoRew", "N", N); - Eigen::Matrix rhoRew(static_cast(N)); - (void) rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(rhoRew, DUMMY_VAR__); - stan::math::fill(rhoRew,DUMMY_VAR__); - validate_non_negative_index("rhoPun", "N", N); - Eigen::Matrix rhoPun(static_cast(N)); - (void) rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(rhoPun, DUMMY_VAR__); - stan::math::fill(rhoPun,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - stan::math::assign(rhoRew, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rhoRew_pr)))); - stan::math::assign(rhoPun, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),rhoPun_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rhoRew",rhoRew,0); - check_greater_or_equal(function__,"rhoPun",rhoPun,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoRew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoPun[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - local_scalar_t__ mu_rhoRew; - (void) mu_rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(mu_rhoRew, DUMMY_VAR__); - stan::math::fill(mu_rhoRew,DUMMY_VAR__); - local_scalar_t__ mu_rhoPun; - (void) mu_rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(mu_rhoPun, DUMMY_VAR__); - stan::math::fill(mu_rhoPun,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - stan::math::assign(mu_pi, get_base1(mu_p,4,"mu_p",1)); - stan::math::assign(mu_rhoRew, stan::math::exp(get_base1(mu_p,5,"mu_p",1))); - stan::math::assign(mu_rhoPun, stan::math::exp(get_base1(mu_p,6,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::math::assign(sv, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } else { - - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } - } else { - - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_rhoRew",mu_rhoRew,0); - check_greater_or_equal(function__,"mu_rhoPun",mu_rhoPun,0); - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_pi); - vars__.push_back(mu_rhoRew); - vars__.push_back(mu_rhoPun); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m4"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoRew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoPun"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoRew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoPun"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m4_reg_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m4_reg"); - reader.add_event(204, 202, "end", "model_gng_m4_reg"); - return reader; -} - -class model_gng_m4_reg : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m4_reg(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m4_reg(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m4_reg_namespace::model_gng_m4_reg"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "6", 6); - num_params_r__ += 6; - validate_non_negative_index("sigma", "6", 6); - num_params_r__ += 6; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rhoRew_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rhoPun_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m4_reg() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "6", 6); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(6)); - vector_d mu_p(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "6", 6); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); - vector_d sigma(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rhoRew_pr"))) - throw std::runtime_error("variable rhoRew_pr missing"); - vals_r__ = context__.vals_r("rhoRew_pr"); - pos__ = 0U; - validate_non_negative_index("rhoRew_pr", "N", N); - context__.validate_dims("initialization", "rhoRew_pr", "vector_d", context__.to_vec(N)); - vector_d rhoRew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rhoRew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rhoRew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rhoRew_pr: ") + e.what()); - } - - if (!(context__.contains_r("rhoPun_pr"))) - throw std::runtime_error("variable rhoPun_pr missing"); - vals_r__ = context__.vals_r("rhoPun_pr"); - pos__ = 0U; - validate_non_negative_index("rhoPun_pr", "N", N); - context__.validate_dims("initialization", "rhoPun_pr", "vector_d", context__.to_vec(N)); - vector_d rhoPun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rhoPun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rhoPun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rhoPun_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(6,lp__); - else - mu_p = in__.vector_constrain(6); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,6,lp__); - else - sigma = in__.vector_lb_constrain(0,6); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix rhoRew_pr; - (void) rhoRew_pr; // dummy to suppress unused var warning - if (jacobian__) - rhoRew_pr = in__.vector_constrain(N,lp__); - else - rhoRew_pr = in__.vector_constrain(N); - - Eigen::Matrix rhoPun_pr; - (void) rhoPun_pr; // dummy to suppress unused var warning - if (jacobian__) - rhoPun_pr = in__.vector_constrain(N,lp__); - else - rhoPun_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("rhoRew", "N", N); - Eigen::Matrix rhoRew(static_cast(N)); - (void) rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(rhoRew, DUMMY_VAR__); - stan::math::fill(rhoRew,DUMMY_VAR__); - validate_non_negative_index("rhoPun", "N", N); - Eigen::Matrix rhoPun(static_cast(N)); - (void) rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(rhoPun, DUMMY_VAR__); - stan::math::fill(rhoPun,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - stan::math::assign(rhoRew, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rhoRew_pr)))); - stan::math::assign(rhoPun, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),rhoPun_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rhoRew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rhoRew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rhoPun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rhoPun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rhoRew",rhoRew,0); - check_greater_or_equal(function__,"rhoPun",rhoPun,0); - - // model body - - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 10.0)); - lp_accum__.add(normal_log(get_base1(mu_p,5,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(mu_p,6,"mu_p",1), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(3, 4), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(5, 6), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - lp_accum__.add(normal_log(pi_pr, 0, 1.0)); - lp_accum__.add(normal_log(rhoRew_pr, 0, 1.0)); - lp_accum__.add(normal_log(rhoPun_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::math::assign(sv, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } else { - - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } - } else { - - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("pi_pr"); - names__.push_back("rhoRew_pr"); - names__.push_back("rhoPun_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("pi"); - names__.push_back("rhoRew"); - names__.push_back("rhoPun"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_pi"); - names__.push_back("mu_rhoRew"); - names__.push_back("mu_rhoPun"); - names__.push_back("log_lik"); - names__.push_back("Qgo"); - names__.push_back("Qnogo"); - names__.push_back("Wgo"); - names__.push_back("Wnogo"); - names__.push_back("SV"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m4_reg_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(6); - vector_d sigma = in__.vector_lb_constrain(0,6); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d rhoRew_pr = in__.vector_constrain(N); - vector_d rhoPun_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoRew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoPun_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("rhoRew", "N", N); - Eigen::Matrix rhoRew(static_cast(N)); - (void) rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(rhoRew, DUMMY_VAR__); - stan::math::fill(rhoRew,DUMMY_VAR__); - validate_non_negative_index("rhoPun", "N", N); - Eigen::Matrix rhoPun(static_cast(N)); - (void) rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(rhoPun, DUMMY_VAR__); - stan::math::fill(rhoPun,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - stan::math::assign(rhoRew, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rhoRew_pr)))); - stan::math::assign(rhoPun, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),rhoPun_pr)))); - - // validate transformed parameters - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - check_greater_or_equal(function__,"rhoRew",rhoRew,0); - check_greater_or_equal(function__,"rhoPun",rhoPun,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoRew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoPun[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - local_scalar_t__ mu_rhoRew; - (void) mu_rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(mu_rhoRew, DUMMY_VAR__); - stan::math::fill(mu_rhoRew,DUMMY_VAR__); - local_scalar_t__ mu_rhoPun; - (void) mu_rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(mu_rhoPun, DUMMY_VAR__); - stan::math::fill(mu_rhoPun,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("Qgo", "N", N); - validate_non_negative_index("Qgo", "T", T); - vector > Qgo(N, (vector(T))); - stan::math::initialize(Qgo, DUMMY_VAR__); - stan::math::fill(Qgo,DUMMY_VAR__); - validate_non_negative_index("Qnogo", "N", N); - validate_non_negative_index("Qnogo", "T", T); - vector > Qnogo(N, (vector(T))); - stan::math::initialize(Qnogo, DUMMY_VAR__); - stan::math::fill(Qnogo,DUMMY_VAR__); - validate_non_negative_index("Wgo", "N", N); - validate_non_negative_index("Wgo", "T", T); - vector > Wgo(N, (vector(T))); - stan::math::initialize(Wgo, DUMMY_VAR__); - stan::math::fill(Wgo,DUMMY_VAR__); - validate_non_negative_index("Wnogo", "N", N); - validate_non_negative_index("Wnogo", "T", T); - vector > Wnogo(N, (vector(T))); - stan::math::initialize(Wnogo, DUMMY_VAR__); - stan::math::fill(Wnogo,DUMMY_VAR__); - validate_non_negative_index("SV", "N", N); - validate_non_negative_index("SV", "T", T); - vector > SV(N, (vector(T))); - stan::math::initialize(SV, DUMMY_VAR__); - stan::math::fill(SV,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - stan::math::assign(mu_pi, get_base1(mu_p,4,"mu_p",1)); - stan::math::assign(mu_rhoRew, stan::math::exp(get_base1(mu_p,5,"mu_p",1))); - stan::math::assign(mu_rhoPun, stan::math::exp(get_base1(mu_p,6,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - stan::math::assign(wv_g, initV); - stan::math::assign(wv_ng, initV); - stan::math::assign(qv_g, initV); - stan::math::assign(qv_ng, initV); - stan::math::assign(sv, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - stan::model::assign(Qgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable Qgo"); - stan::model::assign(Qnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable Qnogo"); - stan::model::assign(Wgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1), - "assigning variable Wgo"); - stan::model::assign(Wnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1), - "assigning variable Wnogo"); - stan::model::assign(SV, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1), - "assigning variable SV"); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } else { - - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } - } else { - - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } else { - - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_rhoRew",mu_rhoRew,0); - check_greater_or_equal(function__,"mu_rhoPun",mu_rhoPun,0); - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_pi); - vars__.push_back(mu_rhoRew); - vars__.push_back(mu_rhoPun); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(SV[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m4_reg"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoRew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoPun"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "SV" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoRew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoPun"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "SV" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_igt_orl_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_igt_orl"); - reader.add_event(206, 204, "end", "model_igt_orl"); - return reader; -} - -class model_igt_orl : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > sign_out; - vector > choice; - vector_d initV; -public: - model_igt_orl(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_igt_orl(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_igt_orl_namespace::model_igt_orl"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("sign_out", "N", N); - validate_non_negative_index("sign_out", "T", T); - context__.validate_dims("data initialization", "sign_out", "double", context__.to_vec(N,T)); - validate_non_negative_index("sign_out", "N", N); - validate_non_negative_index("sign_out", "T", T); - sign_out = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("sign_out"); - pos__ = 0; - size_t sign_out_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < sign_out_limit_1__; ++i_1__) { - size_t sign_out_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < sign_out_limit_0__; ++i_0__) { - sign_out[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "5", 5); - num_params_r__ += 5; - validate_non_negative_index("sigma", "5", 5); - num_params_r__ += 5; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("K_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("betaF_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("betaP_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_igt_orl() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "5", 5); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(5)); - vector_d mu_p(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "5", 5); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(5)); - vector_d sigma(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("K_pr"))) - throw std::runtime_error("variable K_pr missing"); - vals_r__ = context__.vals_r("K_pr"); - pos__ = 0U; - validate_non_negative_index("K_pr", "N", N); - context__.validate_dims("initialization", "K_pr", "vector_d", context__.to_vec(N)); - vector_d K_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - K_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(K_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable K_pr: ") + e.what()); - } - - if (!(context__.contains_r("betaF_pr"))) - throw std::runtime_error("variable betaF_pr missing"); - vals_r__ = context__.vals_r("betaF_pr"); - pos__ = 0U; - validate_non_negative_index("betaF_pr", "N", N); - context__.validate_dims("initialization", "betaF_pr", "vector_d", context__.to_vec(N)); - vector_d betaF_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - betaF_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(betaF_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable betaF_pr: ") + e.what()); - } - - if (!(context__.contains_r("betaP_pr"))) - throw std::runtime_error("variable betaP_pr missing"); - vals_r__ = context__.vals_r("betaP_pr"); - pos__ = 0U; - validate_non_negative_index("betaP_pr", "N", N); - context__.validate_dims("initialization", "betaP_pr", "vector_d", context__.to_vec(N)); - vector_d betaP_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - betaP_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(betaP_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable betaP_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(5,lp__); - else - mu_p = in__.vector_constrain(5); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,5,lp__); - else - sigma = in__.vector_lb_constrain(0,5); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix K_pr; - (void) K_pr; // dummy to suppress unused var warning - if (jacobian__) - K_pr = in__.vector_constrain(N,lp__); - else - K_pr = in__.vector_constrain(N); - - Eigen::Matrix betaF_pr; - (void) betaF_pr; // dummy to suppress unused var warning - if (jacobian__) - betaF_pr = in__.vector_constrain(N,lp__); - else - betaF_pr = in__.vector_constrain(N); - - Eigen::Matrix betaP_pr; - (void) betaP_pr; // dummy to suppress unused var warning - if (jacobian__) - betaP_pr = in__.vector_constrain(N,lp__); - else - betaP_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("K", "N", N); - Eigen::Matrix K(static_cast(N)); - (void) K; // dummy to suppress unused var warning - - stan::math::initialize(K, DUMMY_VAR__); - stan::math::fill(K,DUMMY_VAR__); - validate_non_negative_index("betaF", "N", N); - Eigen::Matrix betaF(static_cast(N)); - (void) betaF; // dummy to suppress unused var warning - - stan::math::initialize(betaF, DUMMY_VAR__); - stan::math::fill(betaF,DUMMY_VAR__); - validate_non_negative_index("betaP", "N", N); - Eigen::Matrix betaP(static_cast(N)); - (void) betaP; // dummy to suppress unused var warning - - stan::math::initialize(betaP, DUMMY_VAR__); - stan::math::fill(betaP,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(K, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx(((get_base1(mu_p,3,"mu_p",1) + get_base1(sigma,3,"sigma",1)) + get_base1(K_pr,i,"K_pr",1))) * 5), - "assigning variable K"); - } - stan::math::assign(betaF, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),betaF_pr))); - stan::math::assign(betaP, add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),betaP_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(K(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: K" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(betaF(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: betaF" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(betaP(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: betaP" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"K",K,0); - check_less_or_equal(function__,"K",K,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 3), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(4, 5), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - lp_accum__.add(normal_log(Arew_pr, 0, 1.0)); - lp_accum__.add(normal_log(Apun_pr, 0, 1.0)); - lp_accum__.add(normal_log(K_pr, 0, 1.0)); - lp_accum__.add(normal_log(betaF_pr, 0, 1.0)); - lp_accum__.add(normal_log(betaP_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ef", "4", 4); - Eigen::Matrix ef(static_cast(4)); - (void) ef; // dummy to suppress unused var warning - - stan::math::initialize(ef, DUMMY_VAR__); - stan::math::fill(ef,DUMMY_VAR__); - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("PEfreq_fic", "4", 4); - Eigen::Matrix PEfreq_fic(static_cast(4)); - (void) PEfreq_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEfreq_fic, DUMMY_VAR__); - stan::math::fill(PEfreq_fic,DUMMY_VAR__); - validate_non_negative_index("PEval_fic", "4", 4); - Eigen::Matrix PEval_fic(static_cast(4)); - (void) PEval_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEval_fic, DUMMY_VAR__); - stan::math::fill(PEval_fic,DUMMY_VAR__); - validate_non_negative_index("pers", "4", 4); - Eigen::Matrix pers(static_cast(4)); - (void) pers; // dummy to suppress unused var warning - - stan::math::initialize(pers, DUMMY_VAR__); - stan::math::fill(pers,DUMMY_VAR__); - validate_non_negative_index("util", "4", 4); - Eigen::Matrix util(static_cast(4)); - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - local_scalar_t__ PEval; - (void) PEval; // dummy to suppress unused var warning - - stan::math::initialize(PEval, DUMMY_VAR__); - stan::math::fill(PEval,DUMMY_VAR__); - local_scalar_t__ PEfreq; - (void) PEfreq; // dummy to suppress unused var warning - - stan::math::initialize(PEfreq, DUMMY_VAR__); - stan::math::fill(PEfreq,DUMMY_VAR__); - local_scalar_t__ efChosen; - (void) efChosen; // dummy to suppress unused var warning - - stan::math::initialize(efChosen, DUMMY_VAR__); - stan::math::fill(efChosen,DUMMY_VAR__); - local_scalar_t__ evChosen; - (void) evChosen; // dummy to suppress unused var warning - - stan::math::initialize(evChosen, DUMMY_VAR__); - stan::math::fill(evChosen,DUMMY_VAR__); - local_scalar_t__ K_tr; - (void) K_tr; // dummy to suppress unused var warning - - stan::math::initialize(K_tr, DUMMY_VAR__); - stan::math::fill(K_tr,DUMMY_VAR__); - - - stan::math::assign(ef, initV); - stan::math::assign(ev, initV); - stan::math::assign(pers, initV); - stan::math::assign(util, initV); - stan::math::assign(K_tr, (pow(3,get_base1(K,i,"K",1)) - 1)); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), util)); - stan::math::assign(PEval, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(PEfreq, (get_base1(get_base1(sign_out,i,"sign_out",1),t,"sign_out",2) - get_base1(ef,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ef",1))); - stan::math::assign(PEfreq_fic, subtract((-(get_base1(get_base1(sign_out,i,"sign_out",1),t,"sign_out",2)) / 3),ef)); - stan::math::assign(efChosen, get_base1(ef,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ef",1)); - stan::math::assign(evChosen, get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1)); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::math::assign(ef, stan::model::deep_copy(add(ef,multiply(get_base1(Apun,i,"Apun",1),PEfreq_fic)))); - stan::model::assign(ef, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (efChosen + (get_base1(Arew,i,"Arew",1) * PEfreq)), - "assigning variable ef"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (evChosen + (get_base1(Arew,i,"Arew",1) * PEval)), - "assigning variable ev"); - } else { - - stan::math::assign(ef, stan::model::deep_copy(add(ef,multiply(get_base1(Arew,i,"Arew",1),PEfreq_fic)))); - stan::model::assign(ef, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (efChosen + (get_base1(Apun,i,"Apun",1) * PEfreq)), - "assigning variable ef"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (evChosen + (get_base1(Apun,i,"Apun",1) * PEval)), - "assigning variable ev"); - } - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - 1, - "assigning variable pers"); - stan::math::assign(pers, stan::model::deep_copy(divide(pers,(1 + K_tr)))); - stan::math::assign(util, add(add(ev,multiply(ef,get_base1(betaF,i,"betaF",1))),multiply(pers,get_base1(betaP,i,"betaP",1)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Arew_pr"); - names__.push_back("Apun_pr"); - names__.push_back("K_pr"); - names__.push_back("betaF_pr"); - names__.push_back("betaP_pr"); - names__.push_back("Arew"); - names__.push_back("Apun"); - names__.push_back("K"); - names__.push_back("betaF"); - names__.push_back("betaP"); - names__.push_back("mu_Arew"); - names__.push_back("mu_Apun"); - names__.push_back("mu_K"); - names__.push_back("mu_betaF"); - names__.push_back("mu_betaP"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_igt_orl_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(5); - vector_d sigma = in__.vector_lb_constrain(0,5); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d K_pr = in__.vector_constrain(N); - vector_d betaF_pr = in__.vector_constrain(N); - vector_d betaP_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(K_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(betaF_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(betaP_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("K", "N", N); - Eigen::Matrix K(static_cast(N)); - (void) K; // dummy to suppress unused var warning - - stan::math::initialize(K, DUMMY_VAR__); - stan::math::fill(K,DUMMY_VAR__); - validate_non_negative_index("betaF", "N", N); - Eigen::Matrix betaF(static_cast(N)); - (void) betaF; // dummy to suppress unused var warning - - stan::math::initialize(betaF, DUMMY_VAR__); - stan::math::fill(betaF,DUMMY_VAR__); - validate_non_negative_index("betaP", "N", N); - Eigen::Matrix betaP(static_cast(N)); - (void) betaP; // dummy to suppress unused var warning - - stan::math::initialize(betaP, DUMMY_VAR__); - stan::math::fill(betaP,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(K, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx(((get_base1(mu_p,3,"mu_p",1) + get_base1(sigma,3,"sigma",1)) + get_base1(K_pr,i,"K_pr",1))) * 5), - "assigning variable K"); - } - stan::math::assign(betaF, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),betaF_pr))); - stan::math::assign(betaP, add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),betaP_pr))); - - // validate transformed parameters - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"K",K,0); - check_less_or_equal(function__,"K",K,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(K[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(betaF[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(betaP[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - local_scalar_t__ mu_K; - (void) mu_K; // dummy to suppress unused var warning - - stan::math::initialize(mu_K, DUMMY_VAR__); - stan::math::fill(mu_K,DUMMY_VAR__); - local_scalar_t__ mu_betaF; - (void) mu_betaF; // dummy to suppress unused var warning - - stan::math::initialize(mu_betaF, DUMMY_VAR__); - stan::math::fill(mu_betaF,DUMMY_VAR__); - local_scalar_t__ mu_betaP; - (void) mu_betaP; // dummy to suppress unused var warning - - stan::math::initialize(mu_betaP, DUMMY_VAR__); - stan::math::fill(mu_betaP,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_K, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - stan::math::assign(mu_betaF, get_base1(mu_p,4,"mu_p",1)); - stan::math::assign(mu_betaP, get_base1(mu_p,5,"mu_p",1)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ef", "4", 4); - Eigen::Matrix ef(static_cast(4)); - (void) ef; // dummy to suppress unused var warning - - stan::math::initialize(ef, DUMMY_VAR__); - stan::math::fill(ef,DUMMY_VAR__); - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("PEfreq_fic", "4", 4); - Eigen::Matrix PEfreq_fic(static_cast(4)); - (void) PEfreq_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEfreq_fic, DUMMY_VAR__); - stan::math::fill(PEfreq_fic,DUMMY_VAR__); - validate_non_negative_index("PEval_fic", "4", 4); - Eigen::Matrix PEval_fic(static_cast(4)); - (void) PEval_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEval_fic, DUMMY_VAR__); - stan::math::fill(PEval_fic,DUMMY_VAR__); - validate_non_negative_index("pers", "4", 4); - Eigen::Matrix pers(static_cast(4)); - (void) pers; // dummy to suppress unused var warning - - stan::math::initialize(pers, DUMMY_VAR__); - stan::math::fill(pers,DUMMY_VAR__); - validate_non_negative_index("util", "4", 4); - Eigen::Matrix util(static_cast(4)); - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - local_scalar_t__ PEval; - (void) PEval; // dummy to suppress unused var warning - - stan::math::initialize(PEval, DUMMY_VAR__); - stan::math::fill(PEval,DUMMY_VAR__); - local_scalar_t__ PEfreq; - (void) PEfreq; // dummy to suppress unused var warning - - stan::math::initialize(PEfreq, DUMMY_VAR__); - stan::math::fill(PEfreq,DUMMY_VAR__); - local_scalar_t__ efChosen; - (void) efChosen; // dummy to suppress unused var warning - - stan::math::initialize(efChosen, DUMMY_VAR__); - stan::math::fill(efChosen,DUMMY_VAR__); - local_scalar_t__ evChosen; - (void) evChosen; // dummy to suppress unused var warning - - stan::math::initialize(evChosen, DUMMY_VAR__); - stan::math::fill(evChosen,DUMMY_VAR__); - local_scalar_t__ K_tr; - (void) K_tr; // dummy to suppress unused var warning - - stan::math::initialize(K_tr, DUMMY_VAR__); - stan::math::fill(K_tr,DUMMY_VAR__); - - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - stan::math::assign(ef, initV); - stan::math::assign(ev, initV); - stan::math::assign(pers, initV); - stan::math::assign(util, initV); - stan::math::assign(K_tr, (pow(3,get_base1(K,i,"K",1)) - 1)); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),util))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(util), base_rng__), - "assigning variable y_pred"); - stan::math::assign(PEval, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(PEfreq, (get_base1(get_base1(sign_out,i,"sign_out",1),t,"sign_out",2) - get_base1(ef,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ef",1))); - stan::math::assign(PEfreq_fic, subtract((-(get_base1(get_base1(sign_out,i,"sign_out",1),t,"sign_out",2)) / 3),ef)); - stan::math::assign(efChosen, get_base1(ef,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ef",1)); - stan::math::assign(evChosen, get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1)); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::math::assign(ef, stan::model::deep_copy(add(ef,multiply(get_base1(Apun,i,"Apun",1),PEfreq_fic)))); - stan::model::assign(ef, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (efChosen + (get_base1(Arew,i,"Arew",1) * PEfreq)), - "assigning variable ef"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (evChosen + (get_base1(Arew,i,"Arew",1) * PEval)), - "assigning variable ev"); - } else { - - stan::math::assign(ef, stan::model::deep_copy(add(ef,multiply(get_base1(Arew,i,"Arew",1),PEfreq_fic)))); - stan::model::assign(ef, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (efChosen + (get_base1(Apun,i,"Apun",1) * PEfreq)), - "assigning variable ef"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (evChosen + (get_base1(Apun,i,"Apun",1) * PEval)), - "assigning variable ev"); - } - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - 1, - "assigning variable pers"); - stan::math::assign(pers, stan::model::deep_copy(divide(pers,(1 + K_tr)))); - stan::math::assign(util, add(add(ev,multiply(ef,get_base1(betaF,i,"betaF",1))),multiply(pers,get_base1(betaP,i,"betaP",1)))); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - check_greater_or_equal(function__,"mu_K",mu_K,0); - check_less_or_equal(function__,"mu_K",mu_K,5); - - // write generated quantities - vars__.push_back(mu_Arew); - vars__.push_back(mu_Apun); - vars__.push_back(mu_K); - vars__.push_back(mu_betaF); - vars__.push_back(mu_betaP); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_igt_orl"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaF_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaP_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaF" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaP" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_K"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_betaF"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_betaP"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaF_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaP_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaF" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaP" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_K"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_betaF"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_betaP"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_igt_pvl_decay_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_igt_pvl_decay"); - reader.add_event(133, 131, "end", "model_igt_pvl_decay"); - return reader; -} - -class model_igt_pvl_decay : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > choice; - vector_d initV; -public: - model_igt_pvl_decay(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_igt_pvl_decay(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_igt_pvl_decay_namespace::model_igt_pvl_decay"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("A_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("cons_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("lambda_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_igt_pvl_decay() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("A_pr"))) - throw std::runtime_error("variable A_pr missing"); - vals_r__ = context__.vals_r("A_pr"); - pos__ = 0U; - validate_non_negative_index("A_pr", "N", N); - context__.validate_dims("initialization", "A_pr", "vector_d", context__.to_vec(N)); - vector_d A_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - A_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(A_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("cons_pr"))) - throw std::runtime_error("variable cons_pr missing"); - vals_r__ = context__.vals_r("cons_pr"); - pos__ = 0U; - validate_non_negative_index("cons_pr", "N", N); - context__.validate_dims("initialization", "cons_pr", "vector_d", context__.to_vec(N)); - vector_d cons_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - cons_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(cons_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable cons_pr: ") + e.what()); - } - - if (!(context__.contains_r("lambda_pr"))) - throw std::runtime_error("variable lambda_pr missing"); - vals_r__ = context__.vals_r("lambda_pr"); - pos__ = 0U; - validate_non_negative_index("lambda_pr", "N", N); - context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); - vector_d lambda_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix A_pr; - (void) A_pr; // dummy to suppress unused var warning - if (jacobian__) - A_pr = in__.vector_constrain(N,lp__); - else - A_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix cons_pr; - (void) cons_pr; // dummy to suppress unused var warning - if (jacobian__) - cons_pr = in__.vector_constrain(N,lp__); - else - cons_pr = in__.vector_constrain(N); - - Eigen::Matrix lambda_pr; - (void) lambda_pr; // dummy to suppress unused var warning - if (jacobian__) - lambda_pr = in__.vector_constrain(N,lp__); - else - lambda_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(A(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: A" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(cons(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: cons" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(A_pr, 0, 1)); - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - lp_accum__.add(normal_log(cons_pr, 0, 1)); - lp_accum__.add(normal_log(lambda_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(theta,ev))); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - } else { - - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - } - stan::math::assign(ev, stan::model::deep_copy(multiply(ev,get_base1(A,i,"A",1)))); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + curUtil)), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("A_pr"); - names__.push_back("alpha_pr"); - names__.push_back("cons_pr"); - names__.push_back("lambda_pr"); - names__.push_back("A"); - names__.push_back("alpha"); - names__.push_back("cons"); - names__.push_back("lambda"); - names__.push_back("mu_A"); - names__.push_back("mu_alpha"); - names__.push_back("mu_cons"); - names__.push_back("mu_lambda"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_igt_pvl_decay_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d A_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d cons_pr = in__.vector_constrain(N); - vector_d lambda_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - - stan::math::initialize(mu_A, DUMMY_VAR__); - stan::math::fill(mu_A,DUMMY_VAR__); - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_cons; - (void) mu_cons; // dummy to suppress unused var warning - - stan::math::initialize(mu_cons, DUMMY_VAR__); - stan::math::fill(mu_cons,DUMMY_VAR__); - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_A, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 2)); - stan::math::assign(mu_cons, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 10)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(theta,ev)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(theta,ev)), base_rng__), - "assigning variable y_pred"); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - } else { - - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - } - stan::math::assign(ev, stan::model::deep_copy(multiply(ev,get_base1(A,i,"A",1)))); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + curUtil)), - "assigning variable ev"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_A",mu_A,0); - check_less_or_equal(function__,"mu_A",mu_A,1); - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - check_greater_or_equal(function__,"mu_cons",mu_cons,0); - check_less_or_equal(function__,"mu_cons",mu_cons,5); - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,10); - - // write generated quantities - vars__.push_back(mu_A); - vars__.push_back(mu_alpha); - vars__.push_back(mu_cons); - vars__.push_back(mu_lambda); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_igt_pvl_decay"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_igt_pvl_delta_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_igt_pvl_delta"); - reader.add_event(131, 129, "end", "model_igt_pvl_delta"); - return reader; -} - -class model_igt_pvl_delta : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > choice; - vector_d initV; -public: - model_igt_pvl_delta(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_igt_pvl_delta(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_igt_pvl_delta_namespace::model_igt_pvl_delta"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("A_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("cons_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("lambda_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_igt_pvl_delta() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("A_pr"))) - throw std::runtime_error("variable A_pr missing"); - vals_r__ = context__.vals_r("A_pr"); - pos__ = 0U; - validate_non_negative_index("A_pr", "N", N); - context__.validate_dims("initialization", "A_pr", "vector_d", context__.to_vec(N)); - vector_d A_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - A_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(A_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("cons_pr"))) - throw std::runtime_error("variable cons_pr missing"); - vals_r__ = context__.vals_r("cons_pr"); - pos__ = 0U; - validate_non_negative_index("cons_pr", "N", N); - context__.validate_dims("initialization", "cons_pr", "vector_d", context__.to_vec(N)); - vector_d cons_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - cons_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(cons_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable cons_pr: ") + e.what()); - } - - if (!(context__.contains_r("lambda_pr"))) - throw std::runtime_error("variable lambda_pr missing"); - vals_r__ = context__.vals_r("lambda_pr"); - pos__ = 0U; - validate_non_negative_index("lambda_pr", "N", N); - context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); - vector_d lambda_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix A_pr; - (void) A_pr; // dummy to suppress unused var warning - if (jacobian__) - A_pr = in__.vector_constrain(N,lp__); - else - A_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix cons_pr; - (void) cons_pr; // dummy to suppress unused var warning - if (jacobian__) - cons_pr = in__.vector_constrain(N,lp__); - else - cons_pr = in__.vector_constrain(N); - - Eigen::Matrix lambda_pr; - (void) lambda_pr; // dummy to suppress unused var warning - if (jacobian__) - lambda_pr = in__.vector_constrain(N,lp__); - else - lambda_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(A(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: A" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(cons(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: cons" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(A_pr, 0, 1)); - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - lp_accum__.add(normal_log(cons_pr, 0, 1)); - lp_accum__.add(normal_log(lambda_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(theta,ev))); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - } else { - - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - } - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * (curUtil - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("A_pr"); - names__.push_back("alpha_pr"); - names__.push_back("cons_pr"); - names__.push_back("lambda_pr"); - names__.push_back("A"); - names__.push_back("alpha"); - names__.push_back("cons"); - names__.push_back("lambda"); - names__.push_back("mu_A"); - names__.push_back("mu_alpha"); - names__.push_back("mu_cons"); - names__.push_back("mu_lambda"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_igt_pvl_delta_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d A_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d cons_pr = in__.vector_constrain(N); - vector_d lambda_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - - stan::math::initialize(mu_A, DUMMY_VAR__); - stan::math::fill(mu_A,DUMMY_VAR__); - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_cons; - (void) mu_cons; // dummy to suppress unused var warning - - stan::math::initialize(mu_cons, DUMMY_VAR__); - stan::math::fill(mu_cons,DUMMY_VAR__); - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_A, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 2)); - stan::math::assign(mu_cons, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 10)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(theta,ev)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(theta,ev)), base_rng__), - "assigning variable y_pred"); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - } else { - - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - } - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * (curUtil - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))))), - "assigning variable ev"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_A",mu_A,0); - check_less_or_equal(function__,"mu_A",mu_A,1); - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - check_greater_or_equal(function__,"mu_cons",mu_cons,0); - check_less_or_equal(function__,"mu_cons",mu_cons,5); - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,10); - - // write generated quantities - vars__.push_back(mu_A); - vars__.push_back(mu_alpha); - vars__.push_back(mu_cons); - vars__.push_back(mu_lambda); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_igt_pvl_delta"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_igt_vpp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_igt_vpp"); - reader.add_event(187, 185, "end", "model_igt_vpp"); - return reader; -} - -class model_igt_vpp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > choice; - vector_d initV; -public: - model_igt_vpp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_igt_vpp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_igt_vpp_namespace::model_igt_vpp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - // initialize data variables - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "8", 8); - num_params_r__ += 8; - validate_non_negative_index("sigma", "8", 8); - num_params_r__ += 8; - validate_non_negative_index("A_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("cons_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("lambda_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("epP_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("epN_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("K_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("w_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_igt_vpp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "8", 8); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(8)); - vector_d mu_p(static_cast(8)); - for (int j1__ = 0U; j1__ < 8; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "8", 8); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(8)); - vector_d sigma(static_cast(8)); - for (int j1__ = 0U; j1__ < 8; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("A_pr"))) - throw std::runtime_error("variable A_pr missing"); - vals_r__ = context__.vals_r("A_pr"); - pos__ = 0U; - validate_non_negative_index("A_pr", "N", N); - context__.validate_dims("initialization", "A_pr", "vector_d", context__.to_vec(N)); - vector_d A_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - A_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(A_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("cons_pr"))) - throw std::runtime_error("variable cons_pr missing"); - vals_r__ = context__.vals_r("cons_pr"); - pos__ = 0U; - validate_non_negative_index("cons_pr", "N", N); - context__.validate_dims("initialization", "cons_pr", "vector_d", context__.to_vec(N)); - vector_d cons_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - cons_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(cons_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable cons_pr: ") + e.what()); - } - - if (!(context__.contains_r("lambda_pr"))) - throw std::runtime_error("variable lambda_pr missing"); - vals_r__ = context__.vals_r("lambda_pr"); - pos__ = 0U; - validate_non_negative_index("lambda_pr", "N", N); - context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); - vector_d lambda_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); - } - - if (!(context__.contains_r("epP_pr"))) - throw std::runtime_error("variable epP_pr missing"); - vals_r__ = context__.vals_r("epP_pr"); - pos__ = 0U; - validate_non_negative_index("epP_pr", "N", N); - context__.validate_dims("initialization", "epP_pr", "vector_d", context__.to_vec(N)); - vector_d epP_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - epP_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(epP_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable epP_pr: ") + e.what()); - } - - if (!(context__.contains_r("epN_pr"))) - throw std::runtime_error("variable epN_pr missing"); - vals_r__ = context__.vals_r("epN_pr"); - pos__ = 0U; - validate_non_negative_index("epN_pr", "N", N); - context__.validate_dims("initialization", "epN_pr", "vector_d", context__.to_vec(N)); - vector_d epN_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - epN_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(epN_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable epN_pr: ") + e.what()); - } - - if (!(context__.contains_r("K_pr"))) - throw std::runtime_error("variable K_pr missing"); - vals_r__ = context__.vals_r("K_pr"); - pos__ = 0U; - validate_non_negative_index("K_pr", "N", N); - context__.validate_dims("initialization", "K_pr", "vector_d", context__.to_vec(N)); - vector_d K_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - K_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(K_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable K_pr: ") + e.what()); - } - - if (!(context__.contains_r("w_pr"))) - throw std::runtime_error("variable w_pr missing"); - vals_r__ = context__.vals_r("w_pr"); - pos__ = 0U; - validate_non_negative_index("w_pr", "N", N); - context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); - vector_d w_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(8,lp__); - else - mu_p = in__.vector_constrain(8); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,8,lp__); - else - sigma = in__.vector_lb_constrain(0,8); - - Eigen::Matrix A_pr; - (void) A_pr; // dummy to suppress unused var warning - if (jacobian__) - A_pr = in__.vector_constrain(N,lp__); - else - A_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix cons_pr; - (void) cons_pr; // dummy to suppress unused var warning - if (jacobian__) - cons_pr = in__.vector_constrain(N,lp__); - else - cons_pr = in__.vector_constrain(N); - - Eigen::Matrix lambda_pr; - (void) lambda_pr; // dummy to suppress unused var warning - if (jacobian__) - lambda_pr = in__.vector_constrain(N,lp__); - else - lambda_pr = in__.vector_constrain(N); - - Eigen::Matrix epP_pr; - (void) epP_pr; // dummy to suppress unused var warning - if (jacobian__) - epP_pr = in__.vector_constrain(N,lp__); - else - epP_pr = in__.vector_constrain(N); - - Eigen::Matrix epN_pr; - (void) epN_pr; // dummy to suppress unused var warning - if (jacobian__) - epN_pr = in__.vector_constrain(N,lp__); - else - epN_pr = in__.vector_constrain(N); - - Eigen::Matrix K_pr; - (void) K_pr; // dummy to suppress unused var warning - if (jacobian__) - K_pr = in__.vector_constrain(N,lp__); - else - K_pr = in__.vector_constrain(N); - - Eigen::Matrix w_pr; - (void) w_pr; // dummy to suppress unused var warning - if (jacobian__) - w_pr = in__.vector_constrain(N,lp__); - else - w_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - validate_non_negative_index("epP", "N", N); - Eigen::Matrix epP(static_cast(N)); - (void) epP; // dummy to suppress unused var warning - - stan::math::initialize(epP, DUMMY_VAR__); - stan::math::fill(epP,DUMMY_VAR__); - validate_non_negative_index("epN", "N", N); - Eigen::Matrix epN(static_cast(N)); - (void) epN; // dummy to suppress unused var warning - - stan::math::initialize(epN, DUMMY_VAR__); - stan::math::fill(epN,DUMMY_VAR__); - validate_non_negative_index("K", "N", N); - Eigen::Matrix K(static_cast(N)); - (void) K; // dummy to suppress unused var warning - - stan::math::initialize(K, DUMMY_VAR__); - stan::math::fill(K,DUMMY_VAR__); - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - stan::model::assign(K, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,7,"mu_p",1) + (get_base1(sigma,7,"sigma",1) * get_base1(K_pr,i,"K_pr",1)))), - "assigning variable K"); - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,8,"mu_p",1) + (get_base1(sigma,8,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - stan::math::assign(epP, add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),epP_pr))); - stan::math::assign(epN, add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),epN_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(A(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: A" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(cons(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: cons" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(epP(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: epP" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(epN(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: epN" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(K(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: K" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - check_greater_or_equal(function__,"K",K,0); - check_less_or_equal(function__,"K",K,1); - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 4), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(5, 6), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(7, 8), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(A_pr, 0, 1.0)); - lp_accum__.add(normal_log(alpha_pr, 0, 1.0)); - lp_accum__.add(normal_log(cons_pr, 0, 1.0)); - lp_accum__.add(normal_log(lambda_pr, 0, 1.0)); - lp_accum__.add(normal_log(epP_pr, 0, 1.0)); - lp_accum__.add(normal_log(epN_pr, 0, 1.0)); - lp_accum__.add(normal_log(K_pr, 0, 1.0)); - lp_accum__.add(normal_log(w_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("p_next", "4", 4); - Eigen::Matrix p_next(static_cast(4)); - (void) p_next; // dummy to suppress unused var warning - - stan::math::initialize(p_next, DUMMY_VAR__); - stan::math::fill(p_next,DUMMY_VAR__); - validate_non_negative_index("str", "4", 4); - Eigen::Matrix str(static_cast(4)); - (void) str; // dummy to suppress unused var warning - - stan::math::initialize(str, DUMMY_VAR__); - stan::math::fill(str,DUMMY_VAR__); - validate_non_negative_index("pers", "4", 4); - Eigen::Matrix pers(static_cast(4)); - (void) pers; // dummy to suppress unused var warning - - stan::math::initialize(pers, DUMMY_VAR__); - stan::math::fill(pers,DUMMY_VAR__); - validate_non_negative_index("V", "4", 4); - Eigen::Matrix V(static_cast(4)); - (void) V; // dummy to suppress unused var warning - - stan::math::initialize(V, DUMMY_VAR__); - stan::math::fill(V,DUMMY_VAR__); - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - stan::math::assign(ev, initV); - stan::math::assign(pers, initV); - stan::math::assign(V, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(theta,V))); - stan::math::assign(pers, stan::model::deep_copy(multiply(pers,get_base1(K,i,"K",1)))); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(pers,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"pers",1) + get_base1(epP,i,"epP",1))), - "assigning variable pers"); - } else { - - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(pers,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"pers",1) + get_base1(epN,i,"epN",1))), - "assigning variable pers"); - } - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * (curUtil - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))))), - "assigning variable ev"); - stan::math::assign(V, add(multiply(get_base1(w,i,"w",1),ev),multiply((1 - get_base1(w,i,"w",1)),pers))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("A_pr"); - names__.push_back("alpha_pr"); - names__.push_back("cons_pr"); - names__.push_back("lambda_pr"); - names__.push_back("epP_pr"); - names__.push_back("epN_pr"); - names__.push_back("K_pr"); - names__.push_back("w_pr"); - names__.push_back("A"); - names__.push_back("alpha"); - names__.push_back("cons"); - names__.push_back("lambda"); - names__.push_back("epP"); - names__.push_back("epN"); - names__.push_back("K"); - names__.push_back("w"); - names__.push_back("mu_A"); - names__.push_back("mu_alpha"); - names__.push_back("mu_cons"); - names__.push_back("mu_lambda"); - names__.push_back("mu_epP"); - names__.push_back("mu_epN"); - names__.push_back("mu_K"); - names__.push_back("mu_w"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(8); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(8); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_igt_vpp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(8); - vector_d sigma = in__.vector_lb_constrain(0,8); - vector_d A_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d cons_pr = in__.vector_constrain(N); - vector_d lambda_pr = in__.vector_constrain(N); - vector_d epP_pr = in__.vector_constrain(N); - vector_d epN_pr = in__.vector_constrain(N); - vector_d K_pr = in__.vector_constrain(N); - vector_d w_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 8; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 8; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(epP_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(epN_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(K_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - validate_non_negative_index("epP", "N", N); - Eigen::Matrix epP(static_cast(N)); - (void) epP; // dummy to suppress unused var warning - - stan::math::initialize(epP, DUMMY_VAR__); - stan::math::fill(epP,DUMMY_VAR__); - validate_non_negative_index("epN", "N", N); - Eigen::Matrix epN(static_cast(N)); - (void) epN; // dummy to suppress unused var warning - - stan::math::initialize(epN, DUMMY_VAR__); - stan::math::fill(epN,DUMMY_VAR__); - validate_non_negative_index("K", "N", N); - Eigen::Matrix K(static_cast(N)); - (void) K; // dummy to suppress unused var warning - - stan::math::initialize(K, DUMMY_VAR__); - stan::math::fill(K,DUMMY_VAR__); - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - stan::model::assign(K, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,7,"mu_p",1) + (get_base1(sigma,7,"sigma",1) * get_base1(K_pr,i,"K_pr",1)))), - "assigning variable K"); - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,8,"mu_p",1) + (get_base1(sigma,8,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - stan::math::assign(epP, add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),epP_pr))); - stan::math::assign(epN, add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),epN_pr))); - - // validate transformed parameters - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - check_greater_or_equal(function__,"K",K,0); - check_less_or_equal(function__,"K",K,1); - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(epP[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(epN[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(K[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - - stan::math::initialize(mu_A, DUMMY_VAR__); - stan::math::fill(mu_A,DUMMY_VAR__); - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_cons; - (void) mu_cons; // dummy to suppress unused var warning - - stan::math::initialize(mu_cons, DUMMY_VAR__); - stan::math::fill(mu_cons,DUMMY_VAR__); - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - local_scalar_t__ mu_epP; - (void) mu_epP; // dummy to suppress unused var warning - - stan::math::initialize(mu_epP, DUMMY_VAR__); - stan::math::fill(mu_epP,DUMMY_VAR__); - local_scalar_t__ mu_epN; - (void) mu_epN; // dummy to suppress unused var warning - - stan::math::initialize(mu_epN, DUMMY_VAR__); - stan::math::fill(mu_epN,DUMMY_VAR__); - local_scalar_t__ mu_K; - (void) mu_K; // dummy to suppress unused var warning - - stan::math::initialize(mu_K, DUMMY_VAR__); - stan::math::fill(mu_K,DUMMY_VAR__); - local_scalar_t__ mu_w; - (void) mu_w; // dummy to suppress unused var warning - - stan::math::initialize(mu_w, DUMMY_VAR__); - stan::math::fill(mu_w,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_A, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 2)); - stan::math::assign(mu_cons, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 10)); - stan::math::assign(mu_epP, get_base1(mu_p,5,"mu_p",1)); - stan::math::assign(mu_epN, get_base1(mu_p,6,"mu_p",1)); - stan::math::assign(mu_K, Phi_approx(get_base1(mu_p,7,"mu_p",1))); - stan::math::assign(mu_w, Phi_approx(get_base1(mu_p,8,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("p_next", "4", 4); - Eigen::Matrix p_next(static_cast(4)); - (void) p_next; // dummy to suppress unused var warning - - stan::math::initialize(p_next, DUMMY_VAR__); - stan::math::fill(p_next,DUMMY_VAR__); - validate_non_negative_index("str", "4", 4); - Eigen::Matrix str(static_cast(4)); - (void) str; // dummy to suppress unused var warning - - stan::math::initialize(str, DUMMY_VAR__); - stan::math::fill(str,DUMMY_VAR__); - validate_non_negative_index("pers", "4", 4); - Eigen::Matrix pers(static_cast(4)); - (void) pers; // dummy to suppress unused var warning - - stan::math::initialize(pers, DUMMY_VAR__); - stan::math::fill(pers,DUMMY_VAR__); - validate_non_negative_index("V", "4", 4); - Eigen::Matrix V(static_cast(4)); - (void) V; // dummy to suppress unused var warning - - stan::math::initialize(V, DUMMY_VAR__); - stan::math::fill(V,DUMMY_VAR__); - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - stan::math::assign(ev, initV); - stan::math::assign(pers, initV); - stan::math::assign(V, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(theta,V)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(theta,V)), base_rng__), - "assigning variable y_pred"); - stan::math::assign(pers, stan::model::deep_copy(multiply(pers,get_base1(K,i,"K",1)))); - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(pers,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"pers",1) + get_base1(epP,i,"epP",1))), - "assigning variable pers"); - } else { - - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(pers,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"pers",1) + get_base1(epN,i,"epN",1))), - "assigning variable pers"); - } - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * (curUtil - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))))), - "assigning variable ev"); - stan::math::assign(V, add(multiply(get_base1(w,i,"w",1),ev),multiply((1 - get_base1(w,i,"w",1)),pers))); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_A",mu_A,0); - check_less_or_equal(function__,"mu_A",mu_A,1); - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - check_greater_or_equal(function__,"mu_cons",mu_cons,0); - check_less_or_equal(function__,"mu_cons",mu_cons,5); - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,10); - check_greater_or_equal(function__,"mu_K",mu_K,0); - check_less_or_equal(function__,"mu_K",mu_K,1); - check_greater_or_equal(function__,"mu_w",mu_w,0); - check_less_or_equal(function__,"mu_w",mu_w,1); - - // write generated quantities - vars__.push_back(mu_A); - vars__.push_back(mu_alpha); - vars__.push_back(mu_cons); - vars__.push_back(mu_lambda); - vars__.push_back(mu_epP); - vars__.push_back(mu_epN); - vars__.push_back(mu_K); - vars__.push_back(mu_w); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_igt_vpp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 8; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 8; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epP_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epN_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epP" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epN" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_epP"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_epN"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_K"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 8; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 8; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epP_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epN_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epP" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epN" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_epP"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_epN"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_K"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_peer_ocu_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_peer_ocu"); - reader.add_event(114, 112, "end", "model_peer_ocu"); - return reader; -} - -class model_peer_ocu : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > condition; - vector > safe_Hpayoff; - vector > safe_Lpayoff; - vector > risky_Hpayoff; - vector > risky_Lpayoff; - vector > p_gamble; -public: - model_peer_ocu(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_peer_ocu(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_peer_ocu_namespace::model_peer_ocu"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("condition", "N", N); - validate_non_negative_index("condition", "T", T); - context__.validate_dims("data initialization", "condition", "int", context__.to_vec(N,T)); - validate_non_negative_index("condition", "N", N); - validate_non_negative_index("condition", "T", T); - condition = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("condition"); - pos__ = 0; - size_t condition_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < condition_limit_1__; ++i_1__) { - size_t condition_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < condition_limit_0__; ++i_0__) { - condition[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("safe_Hpayoff", "N", N); - validate_non_negative_index("safe_Hpayoff", "T", T); - context__.validate_dims("data initialization", "safe_Hpayoff", "double", context__.to_vec(N,T)); - validate_non_negative_index("safe_Hpayoff", "N", N); - validate_non_negative_index("safe_Hpayoff", "T", T); - safe_Hpayoff = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("safe_Hpayoff"); - pos__ = 0; - size_t safe_Hpayoff_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < safe_Hpayoff_limit_1__; ++i_1__) { - size_t safe_Hpayoff_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < safe_Hpayoff_limit_0__; ++i_0__) { - safe_Hpayoff[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("safe_Lpayoff", "N", N); - validate_non_negative_index("safe_Lpayoff", "T", T); - context__.validate_dims("data initialization", "safe_Lpayoff", "double", context__.to_vec(N,T)); - validate_non_negative_index("safe_Lpayoff", "N", N); - validate_non_negative_index("safe_Lpayoff", "T", T); - safe_Lpayoff = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("safe_Lpayoff"); - pos__ = 0; - size_t safe_Lpayoff_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < safe_Lpayoff_limit_1__; ++i_1__) { - size_t safe_Lpayoff_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < safe_Lpayoff_limit_0__; ++i_0__) { - safe_Lpayoff[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("risky_Hpayoff", "N", N); - validate_non_negative_index("risky_Hpayoff", "T", T); - context__.validate_dims("data initialization", "risky_Hpayoff", "double", context__.to_vec(N,T)); - validate_non_negative_index("risky_Hpayoff", "N", N); - validate_non_negative_index("risky_Hpayoff", "T", T); - risky_Hpayoff = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("risky_Hpayoff"); - pos__ = 0; - size_t risky_Hpayoff_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < risky_Hpayoff_limit_1__; ++i_1__) { - size_t risky_Hpayoff_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < risky_Hpayoff_limit_0__; ++i_0__) { - risky_Hpayoff[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("risky_Lpayoff", "N", N); - validate_non_negative_index("risky_Lpayoff", "T", T); - context__.validate_dims("data initialization", "risky_Lpayoff", "double", context__.to_vec(N,T)); - validate_non_negative_index("risky_Lpayoff", "N", N); - validate_non_negative_index("risky_Lpayoff", "T", T); - risky_Lpayoff = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("risky_Lpayoff"); - pos__ = 0; - size_t risky_Lpayoff_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < risky_Lpayoff_limit_1__; ++i_1__) { - size_t risky_Lpayoff_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < risky_Lpayoff_limit_0__; ++i_0__) { - risky_Lpayoff[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("p_gamble", "N", N); - validate_non_negative_index("p_gamble", "T", T); - context__.validate_dims("data initialization", "p_gamble", "double", context__.to_vec(N,T)); - validate_non_negative_index("p_gamble", "N", N); - validate_non_negative_index("p_gamble", "T", T); - p_gamble = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("p_gamble"); - pos__ = 0; - size_t p_gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < p_gamble_limit_1__; ++i_1__) { - size_t p_gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < p_gamble_limit_0__; ++i_0__) { - p_gamble[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"condition[k0__][k1__]",condition[k0__][k1__],0); - check_less_or_equal(function__,"condition[k0__][k1__]",condition[k0__][k1__],3); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"p_gamble[k0__][k1__]",p_gamble[k0__][k1__],0); - check_less_or_equal(function__,"p_gamble[k0__][k1__]",p_gamble[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("rho_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("ocu_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_peer_ocu() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("rho_p"))) - throw std::runtime_error("variable rho_p missing"); - vals_r__ = context__.vals_r("rho_p"); - pos__ = 0U; - validate_non_negative_index("rho_p", "N", N); - context__.validate_dims("initialization", "rho_p", "vector_d", context__.to_vec(N)); - vector_d rho_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - if (!(context__.contains_r("ocu_p"))) - throw std::runtime_error("variable ocu_p missing"); - vals_r__ = context__.vals_r("ocu_p"); - pos__ = 0U; - validate_non_negative_index("ocu_p", "N", N); - context__.validate_dims("initialization", "ocu_p", "vector_d", context__.to_vec(N)); - vector_d ocu_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ocu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ocu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ocu_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix rho_p; - (void) rho_p; // dummy to suppress unused var warning - if (jacobian__) - rho_p = in__.vector_constrain(N,lp__); - else - rho_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - Eigen::Matrix ocu_p; - (void) ocu_p; // dummy to suppress unused var warning - if (jacobian__) - ocu_p = in__.vector_constrain(N,lp__); - else - ocu_p = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - validate_non_negative_index("ocu", "N", N); - Eigen::Matrix ocu(static_cast(N)); - (void) ocu; // dummy to suppress unused var warning - - stan::math::initialize(ocu, DUMMY_VAR__); - stan::math::fill(ocu,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - } - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - stan::math::assign(ocu, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),ocu_p))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ocu(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ocu" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(get_base1(sigma,3,"sigma",1), 0, 1.0)); - lp_accum__.add(normal_log(rho_p, 0, 1.0)); - lp_accum__.add(normal_log(tau_p, 0, 1.0)); - lp_accum__.add(normal_log(ocu_p, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ U_safe; - (void) U_safe; // dummy to suppress unused var warning - - stan::math::initialize(U_safe, DUMMY_VAR__); - stan::math::fill(U_safe,DUMMY_VAR__); - local_scalar_t__ U_risky; - (void) U_risky; // dummy to suppress unused var warning - - stan::math::initialize(U_risky, DUMMY_VAR__); - stan::math::fill(U_risky,DUMMY_VAR__); - - - stan::math::assign(U_safe, ((get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2) * pow(get_base1(get_base1(safe_Hpayoff,i,"safe_Hpayoff",1),t,"safe_Hpayoff",2),get_base1(rho,i,"rho",1))) + ((1 - get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2)) * pow(get_base1(get_base1(safe_Lpayoff,i,"safe_Lpayoff",1),t,"safe_Lpayoff",2),get_base1(rho,i,"rho",1))))); - stan::math::assign(U_risky, ((get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2) * pow(get_base1(get_base1(risky_Hpayoff,i,"risky_Hpayoff",1),t,"risky_Hpayoff",2),get_base1(rho,i,"rho",1))) + ((1 - get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2)) * pow(get_base1(get_base1(risky_Lpayoff,i,"risky_Lpayoff",1),t,"risky_Lpayoff",2),get_base1(rho,i,"rho",1))))); - if (as_bool(logical_eq(get_base1(get_base1(condition,i,"condition",1),t,"condition",2),1))) { - - stan::math::assign(U_safe, stan::model::deep_copy((U_safe + get_base1(ocu,i,"ocu",1)))); - } - if (as_bool(logical_eq(get_base1(get_base1(condition,i,"condition",1),t,"condition",2),3))) { - - stan::math::assign(U_risky, stan::model::deep_copy((U_risky + get_base1(ocu,i,"ocu",1)))); - } - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), (get_base1(tau,i,"tau",1) * (U_risky - U_safe)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("rho_p"); - names__.push_back("tau_p"); - names__.push_back("ocu_p"); - names__.push_back("rho"); - names__.push_back("tau"); - names__.push_back("ocu"); - names__.push_back("mu_rho"); - names__.push_back("mu_tau"); - names__.push_back("mu_ocu"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_peer_ocu_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d rho_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - vector_d ocu_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ocu_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - validate_non_negative_index("ocu", "N", N); - Eigen::Matrix ocu(static_cast(N)); - (void) ocu; // dummy to suppress unused var warning - - stan::math::initialize(ocu, DUMMY_VAR__); - stan::math::fill(ocu,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - } - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - stan::math::assign(ocu, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),ocu_p))); - - // validate transformed parameters - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ocu[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - local_scalar_t__ mu_ocu; - (void) mu_ocu; // dummy to suppress unused var warning - - stan::math::initialize(mu_ocu, DUMMY_VAR__); - stan::math::fill(mu_ocu,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_rho, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - stan::math::assign(mu_tau, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_ocu, get_base1(mu_p,3,"mu_p",1)); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0.0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ U_safe; - (void) U_safe; // dummy to suppress unused var warning - - stan::math::initialize(U_safe, DUMMY_VAR__); - stan::math::fill(U_safe,DUMMY_VAR__); - local_scalar_t__ U_risky; - (void) U_risky; // dummy to suppress unused var warning - - stan::math::initialize(U_risky, DUMMY_VAR__); - stan::math::fill(U_risky,DUMMY_VAR__); - - - stan::math::assign(U_safe, ((get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2) * pow(get_base1(get_base1(safe_Hpayoff,i,"safe_Hpayoff",1),t,"safe_Hpayoff",2),get_base1(rho,i,"rho",1))) + ((1 - get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2)) * pow(get_base1(get_base1(safe_Lpayoff,i,"safe_Lpayoff",1),t,"safe_Lpayoff",2),get_base1(rho,i,"rho",1))))); - stan::math::assign(U_risky, ((get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2) * pow(get_base1(get_base1(risky_Hpayoff,i,"risky_Hpayoff",1),t,"risky_Hpayoff",2),get_base1(rho,i,"rho",1))) + ((1 - get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2)) * pow(get_base1(get_base1(risky_Lpayoff,i,"risky_Lpayoff",1),t,"risky_Lpayoff",2),get_base1(rho,i,"rho",1))))); - if (as_bool(logical_eq(get_base1(get_base1(condition,i,"condition",1),t,"condition",2),1))) { - - stan::math::assign(U_safe, stan::model::deep_copy((U_safe + get_base1(ocu,i,"ocu",1)))); - } - if (as_bool(logical_eq(get_base1(get_base1(condition,i,"condition",1),t,"condition",2),3))) { - - stan::math::assign(U_risky, stan::model::deep_copy((U_risky + get_base1(ocu,i,"ocu",1)))); - } - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(tau,i,"tau",1) * (U_risky - U_safe))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((get_base1(tau,i,"tau",1) * (U_risky - U_safe))), base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - check_less_or_equal(function__,"mu_rho",mu_rho,2); - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - - // write generated quantities - vars__.push_back(mu_rho); - vars__.push_back(mu_tau); - vars__.push_back(mu_ocu); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_peer_ocu"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ocu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ocu" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ocu"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ocu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ocu" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ocu"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_ewa_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_ewa"); - reader.add_event(164, 162, "end", "model_prl_ewa"); - return reader; -} - -class model_prl_ewa : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_ewa(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_ewa(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_ewa_namespace::model_prl_ewa"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("phi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_ewa() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("phi_pr"))) - throw std::runtime_error("variable phi_pr missing"); - vals_r__ = context__.vals_r("phi_pr"); - pos__ = 0U; - validate_non_negative_index("phi_pr", "N", N); - context__.validate_dims("initialization", "phi_pr", "vector_d", context__.to_vec(N)); - vector_d phi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - phi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(phi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable phi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix phi_pr; - (void) phi_pr; // dummy to suppress unused var warning - if (jacobian__) - phi_pr = in__.vector_constrain(N,lp__); - else - phi_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("phi", "N", N); - Eigen::Matrix phi(static_cast(N)); - (void) phi; // dummy to suppress unused var warning - - stan::math::initialize(phi, DUMMY_VAR__); - stan::math::fill(phi,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(phi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(phi_pr,i,"phi_pr",1)))), - "assigning variable phi"); - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(rho_pr,i,"rho_pr",1)))), - "assigning variable rho"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(phi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: phi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"phi",phi,0); - check_less_or_equal(function__,"phi",phi,1); - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(phi_pr, 0, 1)); - lp_accum__.add(normal_log(rho_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("ew", "2", 2); - Eigen::Matrix ew(static_cast(2)); - (void) ew; // dummy to suppress unused var warning - - stan::math::initialize(ew, DUMMY_VAR__); - stan::math::fill(ew,DUMMY_VAR__); - local_scalar_t__ ewt1; - (void) ewt1; // dummy to suppress unused var warning - - stan::math::initialize(ewt1, DUMMY_VAR__); - stan::math::fill(ewt1,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - stan::math::assign(ew, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(ev,get_base1(beta,i,"beta",1)))); - stan::math::assign(ewt1, get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1)); - stan::model::assign(ew, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1) * get_base1(rho,i,"rho",1)) + 1)), - "assigning variable ew"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) * get_base1(phi,i,"phi",1)) * ewt1) + get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) / get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("phi_pr"); - names__.push_back("rho_pr"); - names__.push_back("beta_pr"); - names__.push_back("phi"); - names__.push_back("rho"); - names__.push_back("beta"); - names__.push_back("mu_phi"); - names__.push_back("mu_rho"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_ew_c"); - names__.push_back("mr_ew_nc"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_ewa_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d phi_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(phi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("phi", "N", N); - Eigen::Matrix phi(static_cast(N)); - (void) phi; // dummy to suppress unused var warning - - stan::math::initialize(phi, DUMMY_VAR__); - stan::math::fill(phi,DUMMY_VAR__); - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(phi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(phi_pr,i,"phi_pr",1)))), - "assigning variable phi"); - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(rho_pr,i,"rho_pr",1)))), - "assigning variable rho"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"phi",phi,0); - check_less_or_equal(function__,"phi",phi,1); - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(phi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_phi; - (void) mu_phi; // dummy to suppress unused var warning - - stan::math::initialize(mu_phi, DUMMY_VAR__); - stan::math::fill(mu_phi,DUMMY_VAR__); - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - validate_non_negative_index("mr_ew_c", "N", N); - validate_non_negative_index("mr_ew_c", "T", T); - vector > mr_ew_c(N, (vector(T))); - stan::math::initialize(mr_ew_c, DUMMY_VAR__); - stan::math::fill(mr_ew_c,DUMMY_VAR__); - validate_non_negative_index("mr_ew_nc", "N", N); - validate_non_negative_index("mr_ew_nc", "T", T); - vector > mr_ew_nc(N, (vector(T))); - stan::math::initialize(mr_ew_nc, DUMMY_VAR__); - stan::math::fill(mr_ew_nc,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - stan::model::assign(mr_ew_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ew_c"); - stan::model::assign(mr_ew_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ew_nc"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_phi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_rho, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("ew", "2", 2); - Eigen::Matrix ew(static_cast(2)); - (void) ew; // dummy to suppress unused var warning - - stan::math::initialize(ew, DUMMY_VAR__); - stan::math::fill(ew,DUMMY_VAR__); - local_scalar_t__ ewt1; - (void) ewt1; // dummy to suppress unused var warning - - stan::math::initialize(ewt1, DUMMY_VAR__); - stan::math::fill(ewt1,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - stan::math::assign(ew, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(ev,get_base1(beta,i,"beta",1))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(ev,get_base1(beta,i,"beta",1))), base_rng__), - "assigning variable y_pred"); - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - stan::model::assign(mr_ew_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1), - "assigning variable mr_ew_c"); - stan::model::assign(mr_ew_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ew,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ew",1), - "assigning variable mr_ew_nc"); - stan::math::assign(ewt1, get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1)); - stan::model::assign(ew, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1) * get_base1(rho,i,"rho",1)) + 1)), - "assigning variable ew"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) * get_base1(phi,i,"phi",1)) * ewt1) + get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) / get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1))), - "assigning variable ev"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_phi",mu_phi,0); - check_less_or_equal(function__,"mu_phi",mu_phi,1); - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - check_less_or_equal(function__,"mu_rho",mu_rho,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - - // write generated quantities - vars__.push_back(mu_phi); - vars__.push_back(mu_rho); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ew_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ew_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_ewa"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_phi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ew_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ew_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_phi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ew_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ew_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious"); - reader.add_event(168, 166, "end", "model_prl_fictitious"); - return reader; -} - -class model_prl_fictitious : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_fictitious(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_namespace::model_prl_fictitious"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("eta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pr"))) - throw std::runtime_error("variable eta_pr missing"); - vals_r__ = context__.vals_r("eta_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pr", "N", N); - context__.validate_dims("initialization", "eta_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix eta_pr; - (void) eta_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pr = in__.vector_constrain(N,lp__); - else - eta_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - stan::math::assign(alpha, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),alpha_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(get_base1(sigma,1,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(get_base1(sigma,2,"sigma",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(sigma,3,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(eta_pr, 0, 1)); - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), prob)); - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(PEnc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(eta,i,"eta",1) * PE))), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1) + (get_base1(eta,i,"eta",1) * PEnc))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pr"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("mu_eta"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d eta_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - stan::math::assign(alpha, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),alpha_pr))); - - // validate transformed parameters - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_eta; - (void) mu_eta; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta, DUMMY_VAR__); - stan::math::fill(mu_eta,DUMMY_VAR__); - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "T", T); - vector > mr_pe_c(N, (vector(T))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > mr_pe_nc(N, (vector(T))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "T", T); - vector > mr_dv(N, (vector(T))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_dv"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_eta, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_alpha, get_base1(mu_p,2,"mu_p",1)); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),prob))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(PEnc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - PE, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - PEnc, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - (PE - PEnc), - "assigning variable mr_dv"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta,i,"eta",1) * PE)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta,i,"eta",1) * PEnc)), - "assigning variable ev"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_eta",mu_eta,0); - check_less_or_equal(function__,"mu_eta",mu_eta,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - - // write generated quantities - vars__.push_back(mu_eta); - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_multipleB_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious_multipleB"); - reader.add_event(178, 176, "end", "model_prl_fictitious_multipleB"); - return reader; -} - -class model_prl_fictitious_multipleB : public prob_grad { -private: - int N; - int T; - int maxB; - vector B; - vector > Tsubj; - vector > > choice; - vector > > outcome; - vector_d initV; -public: - model_prl_fictitious_multipleB(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious_multipleB(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_multipleB_namespace::model_prl_fictitious_multipleB"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - context__.validate_dims("data initialization", "maxB", "int", context__.to_vec()); - maxB = int(0); - vals_i__ = context__.vals_i("maxB"); - pos__ = 0; - maxB = vals_i__[pos__++]; - validate_non_negative_index("B", "N", N); - context__.validate_dims("data initialization", "B", "int", context__.to_vec(N)); - validate_non_negative_index("B", "N", N); - B = std::vector(N,int(0)); - vals_i__ = context__.vals_i("B"); - pos__ = 0; - size_t B_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < B_limit_0__; ++i_0__) { - B[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("Tsubj", "N", N); - validate_non_negative_index("Tsubj", "maxB", maxB); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N,maxB)); - validate_non_negative_index("Tsubj", "N", N); - validate_non_negative_index("Tsubj", "maxB", maxB); - Tsubj = std::vector >(N,std::vector(maxB,int(0))); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < Tsubj_limit_1__; ++i_1__) { - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "maxB", maxB); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,maxB,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "maxB", maxB); - validate_non_negative_index("choice", "T", T); - choice = std::vector > >(N,std::vector >(maxB,std::vector(T,int(0)))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < choice_limit_2__; ++i_2__) { - size_t choice_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__][i_2__] = vals_i__[pos__++]; - } - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "maxB", maxB); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,maxB,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "maxB", maxB); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector > >(N,std::vector >(maxB,std::vector(T,double(0)))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < outcome_limit_2__; ++i_2__) { - size_t outcome_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__][i_2__] = vals_r__[pos__++]; - } - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,0); - check_greater_or_equal(function__,"maxB",maxB,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"B[k0__]",B[k0__],1); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < maxB; ++k1__) { - check_greater_or_equal(function__,"Tsubj[k0__][k1__]",Tsubj[k0__][k1__],0); - check_less_or_equal(function__,"Tsubj[k0__][k1__]",Tsubj[k0__][k1__],T); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < maxB; ++k1__) { - for (int k2__ = 0; k2__ < T; ++k2__) { - check_greater_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],2); - } - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("eta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious_multipleB() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pr"))) - throw std::runtime_error("variable eta_pr missing"); - vals_r__ = context__.vals_r("eta_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pr", "N", N); - context__.validate_dims("initialization", "eta_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix eta_pr; - (void) eta_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pr = in__.vector_constrain(N,lp__); - else - eta_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - stan::math::assign(alpha, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),alpha_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(get_base1(sigma,1,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(get_base1(sigma,2,"sigma",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(sigma,3,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(eta_pr, 0, 1)); - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - - for (int bIdx = 1; bIdx <= get_base1(B,i,"B",1); ++bIdx) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(get_base1(Tsubj,i,"Tsubj",1),bIdx,"Tsubj",2); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - lp_accum__.add(categorical_log(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3), prob)); - stan::math::assign(PE, (get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3) - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1))); - stan::math::assign(PEnc, (-(get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3)) - get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1))); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1) + (get_base1(eta,i,"eta",1) * PE))), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1) + (get_base1(eta,i,"eta",1) * PEnc))), - "assigning variable ev"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pr"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("mu_eta"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_multipleB_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d eta_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - stan::math::assign(alpha, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),alpha_pr))); - - // validate transformed parameters - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_eta; - (void) mu_eta; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta, DUMMY_VAR__); - stan::math::fill(mu_eta,DUMMY_VAR__); - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "maxB", maxB); - validate_non_negative_index("mr_ev_c", "T", T); - vector > > mr_ev_c(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "maxB", maxB); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > > mr_ev_nc(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "maxB", maxB); - validate_non_negative_index("mr_pe_c", "T", T); - vector > > mr_pe_c(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "maxB", maxB); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > > mr_pe_nc(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "maxB", maxB); - validate_non_negative_index("mr_dv", "T", T); - vector > > mr_dv(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "maxB", maxB); - validate_non_negative_index("y_pred", "T", T); - vector > > y_pred(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int b = 1; b <= maxB; ++b) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_dv"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - -(1), - "assigning variable y_pred"); - } - } - } - stan::math::assign(mu_eta, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_alpha, get_base1(mu_p,2,"mu_p",1)); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int bIdx = 1; bIdx <= get_base1(B,i,"B",1); ++bIdx) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(get_base1(Tsubj,i,"Tsubj",1),bIdx,"Tsubj",2); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_log(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),prob))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - stan::math::assign(PE, (get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3) - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1))); - stan::math::assign(PEnc, (-(get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3)) - get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1))); - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1), - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1), - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - PE, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - PEnc, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - (PE - PEnc), - "assigning variable mr_dv"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1) + (get_base1(eta,i,"eta",1) * PE))), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1) + (get_base1(eta,i,"eta",1) * PEnc))), - "assigning variable ev"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_eta",mu_eta,0); - check_less_or_equal(function__,"mu_eta",mu_eta,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - - // write generated quantities - vars__.push_back(mu_eta); - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__][k_2__]); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious_multipleB"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_rp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious_rp"); - reader.add_event(183, 181, "end", "model_prl_fictitious_rp"); - return reader; -} - -class model_prl_fictitious_rp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_fictitious_rp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious_rp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_rp_namespace::model_prl_fictitious_rp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("eta_pos_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("eta_neg_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious_rp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pos_pr"))) - throw std::runtime_error("variable eta_pos_pr missing"); - vals_r__ = context__.vals_r("eta_pos_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pos_pr", "N", N); - context__.validate_dims("initialization", "eta_pos_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pos_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pos_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pos_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pos_pr: ") + e.what()); - } - - if (!(context__.contains_r("eta_neg_pr"))) - throw std::runtime_error("variable eta_neg_pr missing"); - vals_r__ = context__.vals_r("eta_neg_pr"); - pos__ = 0U; - validate_non_negative_index("eta_neg_pr", "N", N); - context__.validate_dims("initialization", "eta_neg_pr", "vector_d", context__.to_vec(N)); - vector_d eta_neg_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_neg_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_neg_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_neg_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix eta_pos_pr; - (void) eta_pos_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pos_pr = in__.vector_constrain(N,lp__); - else - eta_pos_pr = in__.vector_constrain(N); - - Eigen::Matrix eta_neg_pr; - (void) eta_neg_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_neg_pr = in__.vector_constrain(N,lp__); - else - eta_neg_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("eta_pos", "N", N); - Eigen::Matrix eta_pos(static_cast(N)); - (void) eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(eta_pos, DUMMY_VAR__); - stan::math::fill(eta_pos,DUMMY_VAR__); - validate_non_negative_index("eta_neg", "N", N); - Eigen::Matrix eta_neg(static_cast(N)); - (void) eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(eta_neg, DUMMY_VAR__); - stan::math::fill(eta_neg,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta_pos, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pos_pr,i,"eta_pos_pr",1)))), - "assigning variable eta_pos"); - stan::model::assign(eta_neg, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(eta_neg_pr,i,"eta_neg_pr",1)))), - "assigning variable eta_neg"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - stan::math::assign(alpha, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),alpha_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta_pos(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta_pos" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta_neg(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta_neg" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"eta_pos",eta_pos,0); - check_less_or_equal(function__,"eta_pos",eta_pos,1); - check_greater_or_equal(function__,"eta_neg",eta_neg,0); - check_less_or_equal(function__,"eta_neg",eta_neg,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - lp_accum__.add(cauchy_log(get_base1(sigma,3,"sigma",1), 0, 1.0)); - lp_accum__.add(normal_log(get_base1(sigma,4,"sigma",1), 0, 0.20000000000000001)); - lp_accum__.add(normal_log(eta_pos_pr, 0, 1)); - lp_accum__.add(normal_log(eta_neg_pr, 0, 1)); - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ pe_c; - (void) pe_c; // dummy to suppress unused var warning - - stan::math::initialize(pe_c, DUMMY_VAR__); - stan::math::fill(pe_c,DUMMY_VAR__); - local_scalar_t__ pe_nc; - (void) pe_nc; // dummy to suppress unused var warning - - stan::math::initialize(pe_nc, DUMMY_VAR__); - stan::math::fill(pe_nc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), prob)); - stan::math::assign(pe_c, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(pe_nc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - if (as_bool(logical_gte(pe_c,0))) { - - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_c)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_nc)), - "assigning variable ev"); - } else { - - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_c)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_nc)), - "assigning variable ev"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pos_pr"); - names__.push_back("eta_neg_pr"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta_pos"); - names__.push_back("eta_neg"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("mu_eta_pos"); - names__.push_back("mu_eta_neg"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_rp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d eta_pos_pr = in__.vector_constrain(N); - vector_d eta_neg_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pos_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_neg_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("eta_pos", "N", N); - Eigen::Matrix eta_pos(static_cast(N)); - (void) eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(eta_pos, DUMMY_VAR__); - stan::math::fill(eta_pos,DUMMY_VAR__); - validate_non_negative_index("eta_neg", "N", N); - Eigen::Matrix eta_neg(static_cast(N)); - (void) eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(eta_neg, DUMMY_VAR__); - stan::math::fill(eta_neg,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta_pos, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pos_pr,i,"eta_pos_pr",1)))), - "assigning variable eta_pos"); - stan::model::assign(eta_neg, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(eta_neg_pr,i,"eta_neg_pr",1)))), - "assigning variable eta_neg"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - stan::math::assign(alpha, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),alpha_pr))); - - // validate transformed parameters - check_greater_or_equal(function__,"eta_pos",eta_pos,0); - check_less_or_equal(function__,"eta_pos",eta_pos,1); - check_greater_or_equal(function__,"eta_neg",eta_neg,0); - check_less_or_equal(function__,"eta_neg",eta_neg,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pos[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_neg[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_eta_pos; - (void) mu_eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta_pos, DUMMY_VAR__); - stan::math::fill(mu_eta_pos,DUMMY_VAR__); - local_scalar_t__ mu_eta_neg; - (void) mu_eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta_neg, DUMMY_VAR__); - stan::math::fill(mu_eta_neg,DUMMY_VAR__); - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "T", T); - vector > mr_pe_c(N, (vector(T))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > mr_pe_nc(N, (vector(T))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "T", T); - vector > mr_dv(N, (vector(T))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_dv"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_eta_pos, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_eta_neg, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_alpha, get_base1(mu_p,3,"mu_p",1)); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ pe_c; - (void) pe_c; // dummy to suppress unused var warning - - stan::math::initialize(pe_c, DUMMY_VAR__); - stan::math::fill(pe_c,DUMMY_VAR__); - local_scalar_t__ pe_nc; - (void) pe_nc; // dummy to suppress unused var warning - - stan::math::initialize(pe_nc, DUMMY_VAR__); - stan::math::fill(pe_nc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),prob)), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - stan::math::assign(pe_c, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(pe_nc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe_c, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe_nc, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - (pe_c - pe_nc), - "assigning variable mr_dv"); - if (as_bool(logical_gte(pe_c,0))) { - - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_c)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_nc)), - "assigning variable ev"); - } else { - - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_c)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_nc)), - "assigning variable ev"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_eta_pos",mu_eta_pos,0); - check_less_or_equal(function__,"mu_eta_pos",mu_eta_pos,1); - check_greater_or_equal(function__,"mu_eta_neg",mu_eta_neg,0); - check_less_or_equal(function__,"mu_eta_neg",mu_eta_neg,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - - // write generated quantities - vars__.push_back(mu_eta_pos); - vars__.push_back(mu_eta_neg); - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious_rp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_rp_woa_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious_rp_woa"); - reader.add_event(175, 173, "end", "model_prl_fictitious_rp_woa"); - return reader; -} - -class model_prl_fictitious_rp_woa : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_fictitious_rp_woa(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious_rp_woa(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_rp_woa_namespace::model_prl_fictitious_rp_woa"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("eta_pos_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("eta_neg_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious_rp_woa() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pos_pr"))) - throw std::runtime_error("variable eta_pos_pr missing"); - vals_r__ = context__.vals_r("eta_pos_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pos_pr", "N", N); - context__.validate_dims("initialization", "eta_pos_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pos_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pos_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pos_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pos_pr: ") + e.what()); - } - - if (!(context__.contains_r("eta_neg_pr"))) - throw std::runtime_error("variable eta_neg_pr missing"); - vals_r__ = context__.vals_r("eta_neg_pr"); - pos__ = 0U; - validate_non_negative_index("eta_neg_pr", "N", N); - context__.validate_dims("initialization", "eta_neg_pr", "vector_d", context__.to_vec(N)); - vector_d eta_neg_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_neg_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_neg_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_neg_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix eta_pos_pr; - (void) eta_pos_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pos_pr = in__.vector_constrain(N,lp__); - else - eta_pos_pr = in__.vector_constrain(N); - - Eigen::Matrix eta_neg_pr; - (void) eta_neg_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_neg_pr = in__.vector_constrain(N,lp__); - else - eta_neg_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("eta_pos", "N", N); - Eigen::Matrix eta_pos(static_cast(N)); - (void) eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(eta_pos, DUMMY_VAR__); - stan::math::fill(eta_pos,DUMMY_VAR__); - validate_non_negative_index("eta_neg", "N", N); - Eigen::Matrix eta_neg(static_cast(N)); - (void) eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(eta_neg, DUMMY_VAR__); - stan::math::fill(eta_neg,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta_pos, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pos_pr,i,"eta_pos_pr",1)))), - "assigning variable eta_pos"); - stan::model::assign(eta_neg, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(eta_neg_pr,i,"eta_neg_pr",1)))), - "assigning variable eta_neg"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta_pos(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta_pos" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta_neg(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta_neg" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"eta_pos",eta_pos,0); - check_less_or_equal(function__,"eta_pos",eta_pos,1); - check_greater_or_equal(function__,"eta_neg",eta_neg,0); - check_less_or_equal(function__,"eta_neg",eta_neg,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(eta_pos_pr, 0, 1)); - lp_accum__.add(normal_log(eta_neg_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ pe_c; - (void) pe_c; // dummy to suppress unused var warning - - stan::math::initialize(pe_c, DUMMY_VAR__); - stan::math::fill(pe_c,DUMMY_VAR__); - local_scalar_t__ pe_nc; - (void) pe_nc; // dummy to suppress unused var warning - - stan::math::initialize(pe_nc, DUMMY_VAR__); - stan::math::fill(pe_nc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(ev,2,"ev",1) - get_base1(ev,1,"ev",1)))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), prob)); - stan::math::assign(pe_c, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(pe_nc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - if (as_bool(logical_gte(pe_c,0))) { - - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_c)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_nc)), - "assigning variable ev"); - } else { - - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_c)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_nc)), - "assigning variable ev"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pos_pr"); - names__.push_back("eta_neg_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta_pos"); - names__.push_back("eta_neg"); - names__.push_back("beta"); - names__.push_back("mu_eta_pos"); - names__.push_back("mu_eta_neg"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_rp_woa_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d eta_pos_pr = in__.vector_constrain(N); - vector_d eta_neg_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pos_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_neg_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("eta_pos", "N", N); - Eigen::Matrix eta_pos(static_cast(N)); - (void) eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(eta_pos, DUMMY_VAR__); - stan::math::fill(eta_pos,DUMMY_VAR__); - validate_non_negative_index("eta_neg", "N", N); - Eigen::Matrix eta_neg(static_cast(N)); - (void) eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(eta_neg, DUMMY_VAR__); - stan::math::fill(eta_neg,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta_pos, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pos_pr,i,"eta_pos_pr",1)))), - "assigning variable eta_pos"); - stan::model::assign(eta_neg, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(eta_neg_pr,i,"eta_neg_pr",1)))), - "assigning variable eta_neg"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"eta_pos",eta_pos,0); - check_less_or_equal(function__,"eta_pos",eta_pos,1); - check_greater_or_equal(function__,"eta_neg",eta_neg,0); - check_less_or_equal(function__,"eta_neg",eta_neg,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pos[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_neg[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_eta_pos; - (void) mu_eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta_pos, DUMMY_VAR__); - stan::math::fill(mu_eta_pos,DUMMY_VAR__); - local_scalar_t__ mu_eta_neg; - (void) mu_eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta_neg, DUMMY_VAR__); - stan::math::fill(mu_eta_neg,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "T", T); - vector > mr_pe_c(N, (vector(T))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > mr_pe_nc(N, (vector(T))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "T", T); - vector > mr_dv(N, (vector(T))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_dv"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_eta_pos, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_eta_neg, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ pe_c; - (void) pe_c; // dummy to suppress unused var warning - - stan::math::initialize(pe_c, DUMMY_VAR__); - stan::math::fill(pe_c,DUMMY_VAR__); - local_scalar_t__ pe_nc; - (void) pe_nc; // dummy to suppress unused var warning - - stan::math::initialize(pe_nc, DUMMY_VAR__); - stan::math::fill(pe_nc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(ev,2,"ev",1) - get_base1(ev,1,"ev",1)))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),prob)), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - stan::math::assign(pe_c, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(pe_nc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe_c, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe_nc, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - (pe_c - pe_nc), - "assigning variable mr_dv"); - if (as_bool(logical_gte(pe_c,0))) { - - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_c)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_nc)), - "assigning variable ev"); - } else { - - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_c)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_nc)), - "assigning variable ev"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_eta_pos",mu_eta_pos,0); - check_less_or_equal(function__,"mu_eta_pos",mu_eta_pos,1); - check_greater_or_equal(function__,"mu_eta_neg",mu_eta_neg,0); - check_less_or_equal(function__,"mu_eta_neg",mu_eta_neg,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - - // write generated quantities - vars__.push_back(mu_eta_pos); - vars__.push_back(mu_eta_neg); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious_rp_woa"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_woa_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious_woa"); - reader.add_event(160, 158, "end", "model_prl_fictitious_woa"); - return reader; -} - -class model_prl_fictitious_woa : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_fictitious_woa(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious_woa(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_woa_namespace::model_prl_fictitious_woa"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("eta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious_woa() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pr"))) - throw std::runtime_error("variable eta_pr missing"); - vals_r__ = context__.vals_r("eta_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pr", "N", N); - context__.validate_dims("initialization", "eta_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix eta_pr; - (void) eta_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pr = in__.vector_constrain(N,lp__); - else - eta_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(eta_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(ev,2,"ev",1) - get_base1(ev,1,"ev",1)))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), prob)); - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(PEnc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(eta,i,"eta",1) * PE))), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1) + (get_base1(eta,i,"eta",1) * PEnc))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta"); - names__.push_back("beta"); - names__.push_back("mu_eta"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_woa_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d eta_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_eta; - (void) mu_eta; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta, DUMMY_VAR__); - stan::math::fill(mu_eta,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "T", T); - vector > mr_pe_c(N, (vector(T))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > mr_pe_nc(N, (vector(T))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "T", T); - vector > mr_dv(N, (vector(T))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_dv"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_eta, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(ev,2,"ev",1) - get_base1(ev,1,"ev",1)))))), - "assigning variable prob"); - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),prob))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::math::assign(PEnc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - PE, - "assigning variable mr_pe_c"); - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - PEnc, - "assigning variable mr_pe_nc"); - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - (PE - PEnc), - "assigning variable mr_dv"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta,i,"eta",1) * PE)), - "assigning variable ev"); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta,i,"eta",1) * PEnc)), - "assigning variable ev"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_eta",mu_eta,0); - check_less_or_equal(function__,"mu_eta",mu_eta,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - - // write generated quantities - vars__.push_back(mu_eta); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious_woa"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_rp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_rp"); - reader.add_event(148, 146, "end", "model_prl_rp"); - return reader; -} - -class model_prl_rp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_rp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_rp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_rp_namespace::model_prl_rp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_rp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(Apun_pr, 0, 1)); - lp_accum__.add(normal_log(Arew_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(ev,get_base1(beta,i,"beta",1)))); - stan::math::assign(pe, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - if (as_bool(logical_gt(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(Arew,i,"Arew",1) * pe)), - "assigning variable ev"); - } else { - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(Apun,i,"Apun",1) * pe)), - "assigning variable ev"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Apun_pr"); - names__.push_back("Arew_pr"); - names__.push_back("beta_pr"); - names__.push_back("Apun"); - names__.push_back("Arew"); - names__.push_back("beta"); - names__.push_back("mu_Apun"); - names__.push_back("mu_Arew"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_rp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - validate_non_negative_index("mr_pe", "N", N); - validate_non_negative_index("mr_pe", "T", T); - vector > mr_pe(N, (vector(T))); - stan::math::initialize(mr_pe, DUMMY_VAR__); - stan::math::fill(mr_pe,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(ev,get_base1(beta,i,"beta",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(ev,get_base1(beta,i,"beta",1))), base_rng__), - "assigning variable y_pred"); - stan::math::assign(pe, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe, - "assigning variable mr_pe"); - if (as_bool(logical_gt(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(Arew,i,"Arew",1) * pe)), - "assigning variable ev"); - } else { - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(Apun,i,"Apun",1) * pe)), - "assigning variable ev"); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - - // write generated quantities - vars__.push_back(mu_Apun); - vars__.push_back(mu_Arew); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_rp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_rp_multipleB_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_rp_multipleB"); - reader.add_event(159, 157, "end", "model_prl_rp_multipleB"); - return reader; -} - -class model_prl_rp_multipleB : public prob_grad { -private: - int N; - int T; - int maxB; - vector B; - vector > Tsubj; - vector > > choice; - vector > > outcome; - vector_d initV; -public: - model_prl_rp_multipleB(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_rp_multipleB(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_rp_multipleB_namespace::model_prl_rp_multipleB"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - context__.validate_dims("data initialization", "maxB", "int", context__.to_vec()); - maxB = int(0); - vals_i__ = context__.vals_i("maxB"); - pos__ = 0; - maxB = vals_i__[pos__++]; - validate_non_negative_index("B", "N", N); - context__.validate_dims("data initialization", "B", "int", context__.to_vec(N)); - validate_non_negative_index("B", "N", N); - B = std::vector(N,int(0)); - vals_i__ = context__.vals_i("B"); - pos__ = 0; - size_t B_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < B_limit_0__; ++i_0__) { - B[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("Tsubj", "N", N); - validate_non_negative_index("Tsubj", "maxB", maxB); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N,maxB)); - validate_non_negative_index("Tsubj", "N", N); - validate_non_negative_index("Tsubj", "maxB", maxB); - Tsubj = std::vector >(N,std::vector(maxB,int(0))); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < Tsubj_limit_1__; ++i_1__) { - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "maxB", maxB); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,maxB,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "maxB", maxB); - validate_non_negative_index("choice", "T", T); - choice = std::vector > >(N,std::vector >(maxB,std::vector(T,int(0)))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < choice_limit_2__; ++i_2__) { - size_t choice_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__][i_2__] = vals_i__[pos__++]; - } - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "maxB", maxB); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,maxB,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "maxB", maxB); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector > >(N,std::vector >(maxB,std::vector(T,double(0)))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < outcome_limit_2__; ++i_2__) { - size_t outcome_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__][i_2__] = vals_r__[pos__++]; - } - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,0); - check_greater_or_equal(function__,"maxB",maxB,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"B[k0__]",B[k0__],1); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < maxB; ++k1__) { - check_greater_or_equal(function__,"Tsubj[k0__][k1__]",Tsubj[k0__][k1__],0); - check_less_or_equal(function__,"Tsubj[k0__][k1__]",Tsubj[k0__][k1__],T); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < maxB; ++k1__) { - for (int k2__ = 0; k2__ < T; ++k2__) { - check_greater_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],2); - } - } - } - // initialize data variables - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_rp_multipleB() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(Apun_pr, 0, 1)); - lp_accum__.add(normal_log(Arew_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - - for (int bIdx = 1; bIdx <= get_base1(B,i,"B",1); ++bIdx) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(get_base1(Tsubj,i,"Tsubj",1),bIdx,"Tsubj",2); ++t) { - - lp_accum__.add(categorical_logit_log(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3), multiply(ev,get_base1(beta,i,"beta",1)))); - stan::math::assign(pe, (get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3) - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1))); - if (as_bool(logical_gt(get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3),0))) { - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), "ev") + (get_base1(Arew,i,"Arew",1) * pe)), - "assigning variable ev"); - } else { - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), "ev") + (get_base1(Apun,i,"Apun",1) * pe)), - "assigning variable ev"); - } - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Apun_pr"); - names__.push_back("Arew_pr"); - names__.push_back("beta_pr"); - names__.push_back("Apun"); - names__.push_back("Arew"); - names__.push_back("beta"); - names__.push_back("mu_Apun"); - names__.push_back("mu_Arew"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_rp_multipleB_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "maxB", maxB); - validate_non_negative_index("mr_ev_c", "T", T); - vector > > mr_ev_c(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "maxB", maxB); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > > mr_ev_nc(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - validate_non_negative_index("mr_pe", "N", N); - validate_non_negative_index("mr_pe", "maxB", maxB); - validate_non_negative_index("mr_pe", "T", T); - vector > > mr_pe(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_pe, DUMMY_VAR__); - stan::math::fill(mr_pe,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "maxB", maxB); - validate_non_negative_index("y_pred", "T", T); - vector > > y_pred(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int b = 1; b <= maxB; ++b) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_pe"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - -(1), - "assigning variable y_pred"); - } - } - } - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int bIdx = 1; bIdx <= get_base1(B,i,"B",1); ++bIdx) { - { - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - - - stan::math::assign(ev, initV); - for (int t = 1; t <= get_base1(get_base1(Tsubj,i,"Tsubj",1),bIdx,"Tsubj",2); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),multiply(ev,get_base1(beta,i,"beta",1))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - categorical_rng(softmax(multiply(ev,get_base1(beta,i,"beta",1))), base_rng__), - "assigning variable y_pred"); - stan::math::assign(pe, (get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3) - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1))); - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1), - "assigning variable mr_ev_c"); - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1), - "assigning variable mr_ev_nc"); - stan::model::assign(mr_pe, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - pe, - "assigning variable mr_pe"); - if (as_bool(logical_gt(get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3),0))) { - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), "ev") + (get_base1(Arew,i,"Arew",1) * pe)), - "assigning variable ev"); - } else { - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), "ev") + (get_base1(Apun,i,"Apun",1) * pe)), - "assigning variable ev"); - } - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - - // write generated quantities - vars__.push_back(mu_Apun); - vars__.push_back(mu_Arew); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__][k_2__]); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_rp_multipleB"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_pst_gainloss_Q_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_pst_gainloss_Q"); - reader.add_event(113, 111, "end", "model_pst_gainloss_Q"); - return reader; -} - -class model_pst_gainloss_Q : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > option1; - vector > option2; - vector > choice; - vector > reward; - vector_d initial_values; -public: - model_pst_gainloss_Q(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_pst_gainloss_Q(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_pst_gainloss_Q_namespace::model_pst_gainloss_Q"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("option1", "N", N); - validate_non_negative_index("option1", "T", T); - context__.validate_dims("data initialization", "option1", "int", context__.to_vec(N,T)); - validate_non_negative_index("option1", "N", N); - validate_non_negative_index("option1", "T", T); - option1 = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("option1"); - pos__ = 0; - size_t option1_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < option1_limit_1__; ++i_1__) { - size_t option1_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < option1_limit_0__; ++i_0__) { - option1[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("option2", "N", N); - validate_non_negative_index("option2", "T", T); - context__.validate_dims("data initialization", "option2", "int", context__.to_vec(N,T)); - validate_non_negative_index("option2", "N", N); - validate_non_negative_index("option2", "T", T); - option2 = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("option2"); - pos__ = 0; - size_t option2_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < option2_limit_1__; ++i_1__) { - size_t option2_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < option2_limit_0__; ++i_0__) { - option2[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - context__.validate_dims("data initialization", "reward", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - reward = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward"); - pos__ = 0; - size_t reward_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { - size_t reward_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { - reward[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"option1[k0__][k1__]",option1[k0__][k1__],-(1)); - check_less_or_equal(function__,"option1[k0__][k1__]",option1[k0__][k1__],6); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"option2[k0__][k1__]",option2[k0__][k1__],-(1)); - check_less_or_equal(function__,"option2[k0__][k1__]",option2[k0__][k1__],6); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - // initialize data variables - validate_non_negative_index("initial_values", "6", 6); - initial_values = vector_d(static_cast(6)); - stan::math::fill(initial_values,DUMMY_VAR__); - - stan::math::assign(initial_values, rep_vector(0,6)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("alpha_pos_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("alpha_neg_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_pst_gainloss_Q() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu"))) - throw std::runtime_error("variable mu missing"); - vals_r__ = context__.vals_r("mu"); - pos__ = 0U; - validate_non_negative_index("mu", "3", 3); - context__.validate_dims("initialization", "mu", "vector_d", context__.to_vec(3)); - vector_d mu(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pos_pr"))) - throw std::runtime_error("variable alpha_pos_pr missing"); - vals_r__ = context__.vals_r("alpha_pos_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pos_pr", "N", N); - context__.validate_dims("initialization", "alpha_pos_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pos_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pos_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pos_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pos_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_neg_pr"))) - throw std::runtime_error("variable alpha_neg_pr missing"); - vals_r__ = context__.vals_r("alpha_neg_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_neg_pr", "N", N); - context__.validate_dims("initialization", "alpha_neg_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_neg_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_neg_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_neg_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_neg_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu; - (void) mu; // dummy to suppress unused var warning - if (jacobian__) - mu = in__.vector_constrain(3,lp__); - else - mu = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix alpha_pos_pr; - (void) alpha_pos_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pos_pr = in__.vector_constrain(N,lp__); - else - alpha_pos_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_neg_pr; - (void) alpha_neg_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_neg_pr = in__.vector_constrain(N,lp__); - else - alpha_neg_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("alpha_pos", "N", N); - Eigen::Matrix alpha_pos(static_cast(N)); - (void) alpha_pos; // dummy to suppress unused var warning - - stan::math::initialize(alpha_pos, DUMMY_VAR__); - stan::math::fill(alpha_pos,DUMMY_VAR__); - validate_non_negative_index("alpha_neg", "N", N); - Eigen::Matrix alpha_neg(static_cast(N)); - (void) alpha_neg; // dummy to suppress unused var warning - - stan::math::initialize(alpha_neg, DUMMY_VAR__); - stan::math::fill(alpha_neg,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - stan::math::assign(alpha_pos, Phi_approx(add(get_base1(mu,1,"mu",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pos_pr)))); - stan::math::assign(alpha_neg, Phi_approx(add(get_base1(mu,2,"mu",1),multiply(get_base1(sigma,2,"sigma",1),alpha_neg_pr)))); - stan::math::assign(beta, multiply(Phi_approx(add(get_base1(mu,3,"mu",1),multiply(get_base1(sigma,3,"sigma",1),beta_pr))),10)); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha_pos(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha_pos" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha_neg(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha_neg" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"alpha_pos",alpha_pos,0); - check_less_or_equal(function__,"alpha_pos",alpha_pos,1); - check_greater_or_equal(function__,"alpha_neg",alpha_neg,0); - check_less_or_equal(function__,"alpha_neg",alpha_neg,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - lp_accum__.add(normal_log(mu, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(alpha_pos_pr, 0, 1)); - lp_accum__.add(normal_log(alpha_neg_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - int co(0); - (void) co; // dummy to suppress unused var warning - - stan::math::fill(co, std::numeric_limits::min()); - local_scalar_t__ delta; - (void) delta; // dummy to suppress unused var warning - - stan::math::initialize(delta, DUMMY_VAR__); - stan::math::fill(delta,DUMMY_VAR__); - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - local_scalar_t__ alpha; - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("ev", "6", 6); - Eigen::Matrix ev(static_cast(6)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - - - stan::math::assign(ev, initial_values); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(co, (logical_gt(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),0) ? get_base1(get_base1(option1,i,"option1",1),t,"option1",2) : get_base1(get_base1(option2,i,"option2",1),t,"option2",2) )); - stan::math::assign(delta, (get_base1(ev,get_base1(get_base1(option1,i,"option1",1),t,"option1",2),"ev",1) - get_base1(ev,get_base1(get_base1(option2,i,"option2",1),t,"option2",2),"ev",1))); - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * delta))); - stan::math::assign(pe, (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(ev,co,"ev",1))); - stan::math::assign(alpha, (logical_gte(pe,0) ? stan::math::promote_scalar(get_base1(alpha_pos,i,"alpha_pos",1)) : stan::math::promote_scalar(get_base1(alpha_neg,i,"alpha_neg",1)) )); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(co), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(co), stan::model::nil_index_list()), "ev") + (alpha * pe)), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu"); - names__.push_back("sigma"); - names__.push_back("alpha_pos_pr"); - names__.push_back("alpha_neg_pr"); - names__.push_back("beta_pr"); - names__.push_back("alpha_pos"); - names__.push_back("alpha_neg"); - names__.push_back("beta"); - names__.push_back("mu_alpha_pos"); - names__.push_back("mu_alpha_neg"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_pst_gainloss_Q_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d alpha_pos_pr = in__.vector_constrain(N); - vector_d alpha_neg_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pos_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_neg_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("alpha_pos", "N", N); - Eigen::Matrix alpha_pos(static_cast(N)); - (void) alpha_pos; // dummy to suppress unused var warning - - stan::math::initialize(alpha_pos, DUMMY_VAR__); - stan::math::fill(alpha_pos,DUMMY_VAR__); - validate_non_negative_index("alpha_neg", "N", N); - Eigen::Matrix alpha_neg(static_cast(N)); - (void) alpha_neg; // dummy to suppress unused var warning - - stan::math::initialize(alpha_neg, DUMMY_VAR__); - stan::math::fill(alpha_neg,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - stan::math::assign(alpha_pos, Phi_approx(add(get_base1(mu,1,"mu",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pos_pr)))); - stan::math::assign(alpha_neg, Phi_approx(add(get_base1(mu,2,"mu",1),multiply(get_base1(sigma,2,"sigma",1),alpha_neg_pr)))); - stan::math::assign(beta, multiply(Phi_approx(add(get_base1(mu,3,"mu",1),multiply(get_base1(sigma,3,"sigma",1),beta_pr))),10)); - - // validate transformed parameters - check_greater_or_equal(function__,"alpha_pos",alpha_pos,0); - check_less_or_equal(function__,"alpha_pos",alpha_pos,1); - check_greater_or_equal(function__,"alpha_neg",alpha_neg,0); - check_less_or_equal(function__,"alpha_neg",alpha_neg,1); - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pos[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_neg[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_alpha_pos; - (void) mu_alpha_pos; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha_pos, DUMMY_VAR__); - stan::math::fill(mu_alpha_pos,DUMMY_VAR__); - local_scalar_t__ mu_alpha_neg; - (void) mu_alpha_neg; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha_neg, DUMMY_VAR__); - stan::math::fill(mu_alpha_neg,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - - - stan::math::assign(mu_alpha_pos, Phi_approx(get_base1(mu,1,"mu",1))); - stan::math::assign(mu_alpha_neg, Phi_approx(get_base1(mu,2,"mu",1))); - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu,3,"mu",1)) * 10)); - - for (int i = 1; i <= N; ++i) { - { - int co(0); - (void) co; // dummy to suppress unused var warning - - stan::math::fill(co, std::numeric_limits::min()); - local_scalar_t__ delta; - (void) delta; // dummy to suppress unused var warning - - stan::math::initialize(delta, DUMMY_VAR__); - stan::math::fill(delta,DUMMY_VAR__); - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - local_scalar_t__ alpha; - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("ev", "6", 6); - Eigen::Matrix ev(static_cast(6)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - - - stan::math::assign(ev, initial_values); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(co, (logical_gt(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),0) ? get_base1(get_base1(option1,i,"option1",1),t,"option1",2) : get_base1(get_base1(option2,i,"option2",1),t,"option2",2) )); - stan::math::assign(delta, (get_base1(ev,get_base1(get_base1(option1,i,"option1",1),t,"option1",2),"ev",1) - get_base1(ev,get_base1(get_base1(option2,i,"option2",1),t,"option2",2),"ev",1))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * delta))), - "assigning variable log_lik"); - stan::math::assign(pe, (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(ev,co,"ev",1))); - stan::math::assign(alpha, (logical_gte(pe,0) ? stan::math::promote_scalar(get_base1(alpha_pos,i,"alpha_pos",1)) : stan::math::promote_scalar(get_base1(alpha_neg,i,"alpha_neg",1)) )); - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(co), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(co), stan::model::nil_index_list()), "ev") + (alpha * pe)), - "assigning variable ev"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_alpha_pos",mu_alpha_pos,0); - check_less_or_equal(function__,"mu_alpha_pos",mu_alpha_pos,1); - check_greater_or_equal(function__,"mu_alpha_neg",mu_alpha_neg,0); - check_less_or_equal(function__,"mu_alpha_neg",mu_alpha_neg,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - - // write generated quantities - vars__.push_back(mu_alpha_pos); - vars__.push_back(mu_alpha_neg); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_pst_gainloss_Q"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ra_noLA_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ra_noLA"); - reader.add_event(94, 92, "end", "model_ra_noLA"); - return reader; -} - -class model_ra_noLA : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > gamble; - vector > gain; - vector > cert; - vector > loss; -public: - model_ra_noLA(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ra_noLA(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ra_noLA_namespace::model_ra_noLA"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - context__.validate_dims("data initialization", "gamble", "int", context__.to_vec(N,T)); - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - gamble = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("gamble"); - pos__ = 0; - size_t gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gamble_limit_1__; ++i_1__) { - size_t gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gamble_limit_0__; ++i_0__) { - gamble[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - context__.validate_dims("data initialization", "gain", "double", context__.to_vec(N,T)); - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - gain = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("gain"); - pos__ = 0; - size_t gain_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gain_limit_1__; ++i_1__) { - size_t gain_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gain_limit_0__; ++i_0__) { - gain[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - context__.validate_dims("data initialization", "cert", "double", context__.to_vec(N,T)); - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - cert = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("cert"); - pos__ = 0; - size_t cert_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cert_limit_1__; ++i_1__) { - size_t cert_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cert_limit_0__; ++i_0__) { - cert[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - context__.validate_dims("data initialization", "loss", "double", context__.to_vec(N,T)); - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - loss = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("loss"); - pos__ = 0; - size_t loss_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < loss_limit_1__; ++i_1__) { - size_t loss_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < loss_limit_0__; ++i_0__) { - loss[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],-(1)); - check_less_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gain[k0__][k1__]",gain[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"loss[k0__][k1__]",loss[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("rho_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ra_noLA() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("rho_p"))) - throw std::runtime_error("variable rho_p missing"); - vals_r__ = context__.vals_r("rho_p"); - pos__ = 0U; - validate_non_negative_index("rho_p", "N", N); - context__.validate_dims("initialization", "rho_p", "vector_d", context__.to_vec(N)); - vector_d rho_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix rho_p; - (void) rho_p; // dummy to suppress unused var warning - if (jacobian__) - rho_p = in__.vector_constrain(N,lp__); - else - rho_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - } - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(rho_p, 0, 1.0)); - lp_accum__.add(normal_log(tau_p, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - stan::math::assign(evSafe, pow(get_base1(get_base1(cert,i,"cert",1),t,"cert",2),get_base1(rho,i,"rho",1))); - stan::math::assign(evGamble, (0.5 * (pow(get_base1(get_base1(gain,i,"gain",1),t,"gain",2),get_base1(rho,i,"rho",1)) - pow(get_base1(get_base1(loss,i,"loss",1),t,"loss",2),get_base1(rho,i,"rho",1))))); - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - lp_accum__.add(bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2), pGamble)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("rho_p"); - names__.push_back("tau_p"); - names__.push_back("rho"); - names__.push_back("tau"); - names__.push_back("mu_rho"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ra_noLA_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d rho_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - } - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - - // validate transformed parameters - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_rho, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - stan::math::assign(mu_tau, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - stan::math::assign(evSafe, pow(get_base1(get_base1(cert,i,"cert",1),t,"cert",2),get_base1(rho,i,"rho",1))); - stan::math::assign(evGamble, (0.5 * (pow(get_base1(get_base1(gain,i,"gain",1),t,"gain",2),get_base1(rho,i,"rho",1)) - pow(get_base1(get_base1(loss,i,"loss",1),t,"loss",2),get_base1(rho,i,"rho",1))))); - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),pGamble))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(pGamble, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - check_less_or_equal(function__,"mu_rho",mu_rho,2); - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - - // write generated quantities - vars__.push_back(mu_rho); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ra_noLA"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ra_noRA_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ra_noRA"); - reader.add_event(94, 92, "end", "model_ra_noRA"); - return reader; -} - -class model_ra_noRA : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > gamble; - vector > gain; - vector > cert; - vector > loss; -public: - model_ra_noRA(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ra_noRA(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ra_noRA_namespace::model_ra_noRA"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - context__.validate_dims("data initialization", "gamble", "int", context__.to_vec(N,T)); - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - gamble = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("gamble"); - pos__ = 0; - size_t gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gamble_limit_1__; ++i_1__) { - size_t gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gamble_limit_0__; ++i_0__) { - gamble[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - context__.validate_dims("data initialization", "gain", "double", context__.to_vec(N,T)); - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - gain = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("gain"); - pos__ = 0; - size_t gain_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gain_limit_1__; ++i_1__) { - size_t gain_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gain_limit_0__; ++i_0__) { - gain[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - context__.validate_dims("data initialization", "cert", "double", context__.to_vec(N,T)); - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - cert = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("cert"); - pos__ = 0; - size_t cert_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cert_limit_1__; ++i_1__) { - size_t cert_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cert_limit_0__; ++i_0__) { - cert[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - context__.validate_dims("data initialization", "loss", "double", context__.to_vec(N,T)); - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - loss = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("loss"); - pos__ = 0; - size_t loss_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < loss_limit_1__; ++i_1__) { - size_t loss_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < loss_limit_0__; ++i_0__) { - loss[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],-(1)); - check_less_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gain[k0__][k1__]",gain[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"loss[k0__][k1__]",loss[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - validate_non_negative_index("lambda_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ra_noRA() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("lambda_p"))) - throw std::runtime_error("variable lambda_p missing"); - vals_r__ = context__.vals_r("lambda_p"); - pos__ = 0U; - validate_non_negative_index("lambda_p", "N", N); - context__.validate_dims("initialization", "lambda_p", "vector_d", context__.to_vec(N)); - vector_d lambda_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix lambda_p; - (void) lambda_p; // dummy to suppress unused var warning - if (jacobian__) - lambda_p = in__.vector_constrain(N,lp__); - else - lambda_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(lambda_p,i,"lambda_p",1)))) * 5), - "assigning variable lambda"); - } - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,5); - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(lambda_p, 0, 1.0)); - lp_accum__.add(normal_log(tau_p, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - stan::math::assign(evSafe, get_base1(get_base1(cert,i,"cert",1),t,"cert",2)); - stan::math::assign(evGamble, (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - (get_base1(lambda,i,"lambda",1) * get_base1(get_base1(loss,i,"loss",1),t,"loss",2))))); - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - lp_accum__.add(bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2), pGamble)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("lambda_p"); - names__.push_back("tau_p"); - names__.push_back("lambda"); - names__.push_back("tau"); - names__.push_back("mu_lambda"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ra_noRA_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d lambda_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(lambda_p,i,"lambda_p",1)))) * 5), - "assigning variable lambda"); - } - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - - // validate transformed parameters - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,5); - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 5)); - stan::math::assign(mu_tau, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - stan::math::assign(evSafe, get_base1(get_base1(cert,i,"cert",1),t,"cert",2)); - stan::math::assign(evGamble, (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - (get_base1(lambda,i,"lambda",1) * get_base1(get_base1(loss,i,"loss",1),t,"loss",2))))); - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),pGamble))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(pGamble, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,5); - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - - // write generated quantities - vars__.push_back(mu_lambda); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ra_noRA"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ra_prospect_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ra_prospect"); - reader.add_event(96, 94, "end", "model_ra_prospect"); - return reader; -} - -class model_ra_prospect : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > gamble; - vector > cert; - vector > gain; - vector > loss; -public: - model_ra_prospect(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ra_prospect(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ra_prospect_namespace::model_ra_prospect"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - context__.validate_dims("data initialization", "gamble", "int", context__.to_vec(N,T)); - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - gamble = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("gamble"); - pos__ = 0; - size_t gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gamble_limit_1__; ++i_1__) { - size_t gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gamble_limit_0__; ++i_0__) { - gamble[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - context__.validate_dims("data initialization", "cert", "double", context__.to_vec(N,T)); - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - cert = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("cert"); - pos__ = 0; - size_t cert_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cert_limit_1__; ++i_1__) { - size_t cert_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cert_limit_0__; ++i_0__) { - cert[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - context__.validate_dims("data initialization", "gain", "double", context__.to_vec(N,T)); - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - gain = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("gain"); - pos__ = 0; - size_t gain_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gain_limit_1__; ++i_1__) { - size_t gain_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gain_limit_0__; ++i_0__) { - gain[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - context__.validate_dims("data initialization", "loss", "double", context__.to_vec(N,T)); - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - loss = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("loss"); - pos__ = 0; - size_t loss_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < loss_limit_1__; ++i_1__) { - size_t loss_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < loss_limit_0__; ++i_0__) { - loss[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],-(1)); - check_less_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gain[k0__][k1__]",gain[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"loss[k0__][k1__]",loss[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("rho_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("lambda_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ra_prospect() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("rho_p"))) - throw std::runtime_error("variable rho_p missing"); - vals_r__ = context__.vals_r("rho_p"); - pos__ = 0U; - validate_non_negative_index("rho_p", "N", N); - context__.validate_dims("initialization", "rho_p", "vector_d", context__.to_vec(N)); - vector_d rho_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_p: ") + e.what()); - } - - if (!(context__.contains_r("lambda_p"))) - throw std::runtime_error("variable lambda_p missing"); - vals_r__ = context__.vals_r("lambda_p"); - pos__ = 0U; - validate_non_negative_index("lambda_p", "N", N); - context__.validate_dims("initialization", "lambda_p", "vector_d", context__.to_vec(N)); - vector_d lambda_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix rho_p; - (void) rho_p; // dummy to suppress unused var warning - if (jacobian__) - rho_p = in__.vector_constrain(N,lp__); - else - rho_p = in__.vector_constrain(N); - - Eigen::Matrix lambda_p; - (void) lambda_p; // dummy to suppress unused var warning - if (jacobian__) - lambda_p = in__.vector_constrain(N,lp__); - else - lambda_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(lambda_p,i,"lambda_p",1)))) * 5), - "assigning variable lambda"); - } - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),tau_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,5); - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(rho_p, 0, 1.0)); - lp_accum__.add(normal_log(lambda_p, 0, 1.0)); - lp_accum__.add(normal_log(tau_p, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - stan::math::assign(evSafe, pow(get_base1(get_base1(cert,i,"cert",1),t,"cert",2),get_base1(rho,i,"rho",1))); - stan::math::assign(evGamble, (0.5 * (pow(get_base1(get_base1(gain,i,"gain",1),t,"gain",2),get_base1(rho,i,"rho",1)) - (get_base1(lambda,i,"lambda",1) * pow(get_base1(get_base1(loss,i,"loss",1),t,"loss",2),get_base1(rho,i,"rho",1)))))); - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - lp_accum__.add(bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2), pGamble)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("rho_p"); - names__.push_back("lambda_p"); - names__.push_back("tau_p"); - names__.push_back("rho"); - names__.push_back("lambda"); - names__.push_back("tau"); - names__.push_back("mu_rho"); - names__.push_back("mu_lambda"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ra_prospect_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d rho_p = in__.vector_constrain(N); - vector_d lambda_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(lambda_p,i,"lambda_p",1)))) * 5), - "assigning variable lambda"); - } - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),tau_p)))); - - // validate transformed parameters - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,5); - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_rho, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - stan::math::assign(mu_tau, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - stan::math::assign(evSafe, pow(get_base1(get_base1(cert,i,"cert",1),t,"cert",2),get_base1(rho,i,"rho",1))); - stan::math::assign(evGamble, (0.5 * (pow(get_base1(get_base1(gain,i,"gain",1),t,"gain",2),get_base1(rho,i,"rho",1)) - (get_base1(lambda,i,"lambda",1) * pow(stan::math::fabs(get_base1(get_base1(loss,i,"loss",1),t,"loss",2)),get_base1(rho,i,"rho",1)))))); - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),pGamble))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(pGamble, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - check_less_or_equal(function__,"mu_rho",mu_rho,2); - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,5); - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - - // write generated quantities - vars__.push_back(mu_rho); - vars__.push_back(mu_lambda); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ra_prospect"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_rdt_happiness_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_rdt_happiness"); - reader.add_event(145, 143, "end", "model_rdt_happiness"); - return reader; -} - -class model_rdt_happiness : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > gamble; - vector > type; - vector > cert; - vector > gain; - vector > loss; - vector > outcome; - vector > happy; - vector > RT_happy; -public: - model_rdt_happiness(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_rdt_happiness(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_rdt_happiness_namespace::model_rdt_happiness"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - context__.validate_dims("data initialization", "gamble", "int", context__.to_vec(N,T)); - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - gamble = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("gamble"); - pos__ = 0; - size_t gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gamble_limit_1__; ++i_1__) { - size_t gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gamble_limit_0__; ++i_0__) { - gamble[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("type", "N", N); - validate_non_negative_index("type", "T", T); - context__.validate_dims("data initialization", "type", "int", context__.to_vec(N,T)); - validate_non_negative_index("type", "N", N); - validate_non_negative_index("type", "T", T); - type = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("type"); - pos__ = 0; - size_t type_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < type_limit_1__; ++i_1__) { - size_t type_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < type_limit_0__; ++i_0__) { - type[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - context__.validate_dims("data initialization", "cert", "double", context__.to_vec(N,T)); - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - cert = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("cert"); - pos__ = 0; - size_t cert_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cert_limit_1__; ++i_1__) { - size_t cert_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cert_limit_0__; ++i_0__) { - cert[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - context__.validate_dims("data initialization", "gain", "double", context__.to_vec(N,T)); - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - gain = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("gain"); - pos__ = 0; - size_t gain_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gain_limit_1__; ++i_1__) { - size_t gain_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gain_limit_0__; ++i_0__) { - gain[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - context__.validate_dims("data initialization", "loss", "double", context__.to_vec(N,T)); - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - loss = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("loss"); - pos__ = 0; - size_t loss_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < loss_limit_1__; ++i_1__) { - size_t loss_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < loss_limit_0__; ++i_0__) { - loss[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("happy", "N", N); - validate_non_negative_index("happy", "T", T); - context__.validate_dims("data initialization", "happy", "double", context__.to_vec(N,T)); - validate_non_negative_index("happy", "N", N); - validate_non_negative_index("happy", "T", T); - happy = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("happy"); - pos__ = 0; - size_t happy_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < happy_limit_1__; ++i_1__) { - size_t happy_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < happy_limit_0__; ++i_0__) { - happy[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("RT_happy", "N", N); - validate_non_negative_index("RT_happy", "T", T); - context__.validate_dims("data initialization", "RT_happy", "double", context__.to_vec(N,T)); - validate_non_negative_index("RT_happy", "N", N); - validate_non_negative_index("RT_happy", "T", T); - RT_happy = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("RT_happy"); - pos__ = 0; - size_t RT_happy_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < RT_happy_limit_1__; ++i_1__) { - size_t RT_happy_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < RT_happy_limit_0__; ++i_0__) { - RT_happy[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],-(1)); - check_less_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"type[k0__][k1__]",type[k0__][k1__],-(1)); - check_less_or_equal(function__,"type[k0__][k1__]",type[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gain[k0__][k1__]",gain[k0__][k1__],0); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"loss[k0__][k1__]",loss[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "6", 6); - num_params_r__ += 6; - validate_non_negative_index("sigma", "6", 6); - num_params_r__ += 6; - validate_non_negative_index("w0_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("w1_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("w2_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("w3_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("gam_p", "N", N); - num_params_r__ += N; - validate_non_negative_index("sig_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_rdt_happiness() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "6", 6); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(6)); - vector_d mu_p(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "6", 6); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); - vector_d sigma(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("w0_p"))) - throw std::runtime_error("variable w0_p missing"); - vals_r__ = context__.vals_r("w0_p"); - pos__ = 0U; - validate_non_negative_index("w0_p", "N", N); - context__.validate_dims("initialization", "w0_p", "vector_d", context__.to_vec(N)); - vector_d w0_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w0_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w0_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w0_p: ") + e.what()); - } - - if (!(context__.contains_r("w1_p"))) - throw std::runtime_error("variable w1_p missing"); - vals_r__ = context__.vals_r("w1_p"); - pos__ = 0U; - validate_non_negative_index("w1_p", "N", N); - context__.validate_dims("initialization", "w1_p", "vector_d", context__.to_vec(N)); - vector_d w1_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w1_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w1_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w1_p: ") + e.what()); - } - - if (!(context__.contains_r("w2_p"))) - throw std::runtime_error("variable w2_p missing"); - vals_r__ = context__.vals_r("w2_p"); - pos__ = 0U; - validate_non_negative_index("w2_p", "N", N); - context__.validate_dims("initialization", "w2_p", "vector_d", context__.to_vec(N)); - vector_d w2_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w2_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w2_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w2_p: ") + e.what()); - } - - if (!(context__.contains_r("w3_p"))) - throw std::runtime_error("variable w3_p missing"); - vals_r__ = context__.vals_r("w3_p"); - pos__ = 0U; - validate_non_negative_index("w3_p", "N", N); - context__.validate_dims("initialization", "w3_p", "vector_d", context__.to_vec(N)); - vector_d w3_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w3_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w3_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w3_p: ") + e.what()); - } - - if (!(context__.contains_r("gam_p"))) - throw std::runtime_error("variable gam_p missing"); - vals_r__ = context__.vals_r("gam_p"); - pos__ = 0U; - validate_non_negative_index("gam_p", "N", N); - context__.validate_dims("initialization", "gam_p", "vector_d", context__.to_vec(N)); - vector_d gam_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - gam_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(gam_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable gam_p: ") + e.what()); - } - - if (!(context__.contains_r("sig_p"))) - throw std::runtime_error("variable sig_p missing"); - vals_r__ = context__.vals_r("sig_p"); - pos__ = 0U; - validate_non_negative_index("sig_p", "N", N); - context__.validate_dims("initialization", "sig_p", "vector_d", context__.to_vec(N)); - vector_d sig_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - sig_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(sig_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sig_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(6,lp__); - else - mu_p = in__.vector_constrain(6); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,6,lp__); - else - sigma = in__.vector_lb_constrain(0,6); - - Eigen::Matrix w0_p; - (void) w0_p; // dummy to suppress unused var warning - if (jacobian__) - w0_p = in__.vector_constrain(N,lp__); - else - w0_p = in__.vector_constrain(N); - - Eigen::Matrix w1_p; - (void) w1_p; // dummy to suppress unused var warning - if (jacobian__) - w1_p = in__.vector_constrain(N,lp__); - else - w1_p = in__.vector_constrain(N); - - Eigen::Matrix w2_p; - (void) w2_p; // dummy to suppress unused var warning - if (jacobian__) - w2_p = in__.vector_constrain(N,lp__); - else - w2_p = in__.vector_constrain(N); - - Eigen::Matrix w3_p; - (void) w3_p; // dummy to suppress unused var warning - if (jacobian__) - w3_p = in__.vector_constrain(N,lp__); - else - w3_p = in__.vector_constrain(N); - - Eigen::Matrix gam_p; - (void) gam_p; // dummy to suppress unused var warning - if (jacobian__) - gam_p = in__.vector_constrain(N,lp__); - else - gam_p = in__.vector_constrain(N); - - Eigen::Matrix sig_p; - (void) sig_p; // dummy to suppress unused var warning - if (jacobian__) - sig_p = in__.vector_constrain(N,lp__); - else - sig_p = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("w0", "N", N); - Eigen::Matrix w0(static_cast(N)); - (void) w0; // dummy to suppress unused var warning - - stan::math::initialize(w0, DUMMY_VAR__); - stan::math::fill(w0,DUMMY_VAR__); - validate_non_negative_index("w1", "N", N); - Eigen::Matrix w1(static_cast(N)); - (void) w1; // dummy to suppress unused var warning - - stan::math::initialize(w1, DUMMY_VAR__); - stan::math::fill(w1,DUMMY_VAR__); - validate_non_negative_index("w2", "N", N); - Eigen::Matrix w2(static_cast(N)); - (void) w2; // dummy to suppress unused var warning - - stan::math::initialize(w2, DUMMY_VAR__); - stan::math::fill(w2,DUMMY_VAR__); - validate_non_negative_index("w3", "N", N); - Eigen::Matrix w3(static_cast(N)); - (void) w3; // dummy to suppress unused var warning - - stan::math::initialize(w3, DUMMY_VAR__); - stan::math::fill(w3,DUMMY_VAR__); - validate_non_negative_index("gam", "N", N); - Eigen::Matrix gam(static_cast(N)); - (void) gam; // dummy to suppress unused var warning - - stan::math::initialize(gam, DUMMY_VAR__); - stan::math::fill(gam,DUMMY_VAR__); - validate_non_negative_index("sig", "N", N); - Eigen::Matrix sig(static_cast(N)); - (void) sig; // dummy to suppress unused var warning - - stan::math::initialize(sig, DUMMY_VAR__); - stan::math::fill(sig,DUMMY_VAR__); - - - stan::math::assign(w0, add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),w0_p))); - stan::math::assign(w1, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),w1_p))); - stan::math::assign(w2, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),w2_p))); - stan::math::assign(w3, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),w3_p))); - for (int i = 1; i <= N; ++i) { - - stan::model::assign(gam, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(gam_p,i,"gam_p",1)))), - "assigning variable gam"); - } - stan::math::assign(sig, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),sig_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w0(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w0" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w3(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w3" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(gam(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: gam" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(sig(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: sig" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"gam",gam,0); - check_less_or_equal(function__,"gam",gam,1); - check_greater_or_equal(function__,"sig",sig,0); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(w0_p, 0, 1.0)); - lp_accum__.add(normal_log(w1_p, 0, 1.0)); - lp_accum__.add(normal_log(w2_p, 0, 1.0)); - lp_accum__.add(normal_log(w3_p, 0, 1.0)); - lp_accum__.add(normal_log(gam_p, 0, 1.0)); - lp_accum__.add(normal_log(sig_p, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ cert_sum; - (void) cert_sum; // dummy to suppress unused var warning - - stan::math::initialize(cert_sum, DUMMY_VAR__); - stan::math::fill(cert_sum,DUMMY_VAR__); - local_scalar_t__ ev_sum; - (void) ev_sum; // dummy to suppress unused var warning - - stan::math::initialize(ev_sum, DUMMY_VAR__); - stan::math::fill(ev_sum,DUMMY_VAR__); - local_scalar_t__ rpe_sum; - (void) rpe_sum; // dummy to suppress unused var warning - - stan::math::initialize(rpe_sum, DUMMY_VAR__); - stan::math::fill(rpe_sum,DUMMY_VAR__); - - - stan::math::assign(cert_sum, 0); - stan::math::assign(ev_sum, 0); - stan::math::assign(rpe_sum, 0); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - if (as_bool((primitive_value(logical_eq(t,1)) || primitive_value((primitive_value(logical_gt(t,1)) && primitive_value(logical_neq(get_base1(get_base1(RT_happy,i,"RT_happy",1),t,"RT_happy",2),get_base1(get_base1(RT_happy,i,"RT_happy",1),(t - 1),"RT_happy",2)))))))) { - - lp_accum__.add(normal_log(get_base1(get_base1(happy,i,"happy",1),t,"happy",2), (((get_base1(w0,i,"w0",1) + (get_base1(w1,i,"w1",1) * cert_sum)) + (get_base1(w2,i,"w2",1) * ev_sum)) + (get_base1(w3,i,"w3",1) * rpe_sum)), get_base1(sig,i,"sig",1))); - } - if (as_bool(logical_eq(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),0))) { - - stan::math::assign(cert_sum, stan::model::deep_copy((cert_sum + (get_base1(get_base1(type,i,"type",1),t,"type",2) * get_base1(get_base1(cert,i,"cert",1),t,"cert",2))))); - } else { - - stan::math::assign(ev_sum, stan::model::deep_copy((ev_sum + (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - get_base1(get_base1(loss,i,"loss",1),t,"loss",2)))))); - stan::math::assign(rpe_sum, stan::model::deep_copy(((rpe_sum + get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - get_base1(get_base1(loss,i,"loss",1),t,"loss",2)))))); - } - stan::math::assign(cert_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * cert_sum))); - stan::math::assign(ev_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * ev_sum))); - stan::math::assign(rpe_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * rpe_sum))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("w0_p"); - names__.push_back("w1_p"); - names__.push_back("w2_p"); - names__.push_back("w3_p"); - names__.push_back("gam_p"); - names__.push_back("sig_p"); - names__.push_back("w0"); - names__.push_back("w1"); - names__.push_back("w2"); - names__.push_back("w3"); - names__.push_back("gam"); - names__.push_back("sig"); - names__.push_back("mu_w0"); - names__.push_back("mu_w1"); - names__.push_back("mu_w2"); - names__.push_back("mu_w3"); - names__.push_back("mu_gam"); - names__.push_back("mu_sig"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_rdt_happiness_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(6); - vector_d sigma = in__.vector_lb_constrain(0,6); - vector_d w0_p = in__.vector_constrain(N); - vector_d w1_p = in__.vector_constrain(N); - vector_d w2_p = in__.vector_constrain(N); - vector_d w3_p = in__.vector_constrain(N); - vector_d gam_p = in__.vector_constrain(N); - vector_d sig_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w0_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w1_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w2_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w3_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gam_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(sig_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("w0", "N", N); - Eigen::Matrix w0(static_cast(N)); - (void) w0; // dummy to suppress unused var warning - - stan::math::initialize(w0, DUMMY_VAR__); - stan::math::fill(w0,DUMMY_VAR__); - validate_non_negative_index("w1", "N", N); - Eigen::Matrix w1(static_cast(N)); - (void) w1; // dummy to suppress unused var warning - - stan::math::initialize(w1, DUMMY_VAR__); - stan::math::fill(w1,DUMMY_VAR__); - validate_non_negative_index("w2", "N", N); - Eigen::Matrix w2(static_cast(N)); - (void) w2; // dummy to suppress unused var warning - - stan::math::initialize(w2, DUMMY_VAR__); - stan::math::fill(w2,DUMMY_VAR__); - validate_non_negative_index("w3", "N", N); - Eigen::Matrix w3(static_cast(N)); - (void) w3; // dummy to suppress unused var warning - - stan::math::initialize(w3, DUMMY_VAR__); - stan::math::fill(w3,DUMMY_VAR__); - validate_non_negative_index("gam", "N", N); - Eigen::Matrix gam(static_cast(N)); - (void) gam; // dummy to suppress unused var warning - - stan::math::initialize(gam, DUMMY_VAR__); - stan::math::fill(gam,DUMMY_VAR__); - validate_non_negative_index("sig", "N", N); - Eigen::Matrix sig(static_cast(N)); - (void) sig; // dummy to suppress unused var warning - - stan::math::initialize(sig, DUMMY_VAR__); - stan::math::fill(sig,DUMMY_VAR__); - - - stan::math::assign(w0, add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),w0_p))); - stan::math::assign(w1, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),w1_p))); - stan::math::assign(w2, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),w2_p))); - stan::math::assign(w3, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),w3_p))); - for (int i = 1; i <= N; ++i) { - - stan::model::assign(gam, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(gam_p,i,"gam_p",1)))), - "assigning variable gam"); - } - stan::math::assign(sig, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),sig_p)))); - - // validate transformed parameters - check_greater_or_equal(function__,"gam",gam,0); - check_less_or_equal(function__,"gam",gam,1); - check_greater_or_equal(function__,"sig",sig,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w0[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w3[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gam[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(sig[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_w0; - (void) mu_w0; // dummy to suppress unused var warning - - stan::math::initialize(mu_w0, DUMMY_VAR__); - stan::math::fill(mu_w0,DUMMY_VAR__); - local_scalar_t__ mu_w1; - (void) mu_w1; // dummy to suppress unused var warning - - stan::math::initialize(mu_w1, DUMMY_VAR__); - stan::math::fill(mu_w1,DUMMY_VAR__); - local_scalar_t__ mu_w2; - (void) mu_w2; // dummy to suppress unused var warning - - stan::math::initialize(mu_w2, DUMMY_VAR__); - stan::math::fill(mu_w2,DUMMY_VAR__); - local_scalar_t__ mu_w3; - (void) mu_w3; // dummy to suppress unused var warning - - stan::math::initialize(mu_w3, DUMMY_VAR__); - stan::math::fill(mu_w3,DUMMY_VAR__); - local_scalar_t__ mu_gam; - (void) mu_gam; // dummy to suppress unused var warning - - stan::math::initialize(mu_gam, DUMMY_VAR__); - stan::math::fill(mu_gam,DUMMY_VAR__); - local_scalar_t__ mu_sig; - (void) mu_sig; // dummy to suppress unused var warning - - stan::math::initialize(mu_sig, DUMMY_VAR__); - stan::math::fill(mu_sig,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_w0, get_base1(mu_p,1,"mu_p",1)); - stan::math::assign(mu_w1, get_base1(mu_p,2,"mu_p",1)); - stan::math::assign(mu_w2, get_base1(mu_p,3,"mu_p",1)); - stan::math::assign(mu_w3, get_base1(mu_p,4,"mu_p",1)); - stan::math::assign(mu_gam, Phi_approx(get_base1(mu_p,5,"mu_p",1))); - stan::math::assign(mu_sig, stan::math::exp(get_base1(mu_p,6,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ cert_sum; - (void) cert_sum; // dummy to suppress unused var warning - - stan::math::initialize(cert_sum, DUMMY_VAR__); - stan::math::fill(cert_sum,DUMMY_VAR__); - local_scalar_t__ ev_sum; - (void) ev_sum; // dummy to suppress unused var warning - - stan::math::initialize(ev_sum, DUMMY_VAR__); - stan::math::fill(ev_sum,DUMMY_VAR__); - local_scalar_t__ rpe_sum; - (void) rpe_sum; // dummy to suppress unused var warning - - stan::math::initialize(rpe_sum, DUMMY_VAR__); - stan::math::fill(rpe_sum,DUMMY_VAR__); - - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - stan::math::assign(cert_sum, 0); - stan::math::assign(ev_sum, 0); - stan::math::assign(rpe_sum, 0); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - if (as_bool((primitive_value(logical_eq(t,1)) || primitive_value((primitive_value(logical_gt(t,1)) && primitive_value(logical_neq(get_base1(get_base1(RT_happy,i,"RT_happy",1),t,"RT_happy",2),get_base1(get_base1(RT_happy,i,"RT_happy",1),(t - 1),"RT_happy",2)))))))) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + normal_log(get_base1(get_base1(happy,i,"happy",1),t,"happy",2),(((get_base1(w0,i,"w0",1) + (get_base1(w1,i,"w1",1) * cert_sum)) + (get_base1(w2,i,"w2",1) * ev_sum)) + (get_base1(w3,i,"w3",1) * rpe_sum)),get_base1(sig,i,"sig",1)))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - normal_rng((((get_base1(w0,i,"w0",1) + (get_base1(w1,i,"w1",1) * cert_sum)) + (get_base1(w2,i,"w2",1) * ev_sum)) + (get_base1(w3,i,"w3",1) * rpe_sum)),get_base1(sig,i,"sig",1), base_rng__), - "assigning variable y_pred"); - } - if (as_bool(logical_eq(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),0))) { - - stan::math::assign(cert_sum, stan::model::deep_copy((cert_sum + (get_base1(get_base1(type,i,"type",1),t,"type",2) * get_base1(get_base1(cert,i,"cert",1),t,"cert",2))))); - } else { - - stan::math::assign(ev_sum, stan::model::deep_copy((ev_sum + (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - get_base1(get_base1(loss,i,"loss",1),t,"loss",2)))))); - stan::math::assign(rpe_sum, stan::model::deep_copy(((rpe_sum + get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - get_base1(get_base1(loss,i,"loss",1),t,"loss",2)))))); - } - stan::math::assign(cert_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * cert_sum))); - stan::math::assign(ev_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * ev_sum))); - stan::math::assign(rpe_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * rpe_sum))); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_gam",mu_gam,0); - check_less_or_equal(function__,"mu_gam",mu_gam,1); - check_greater_or_equal(function__,"mu_sig",mu_sig,0); - - // write generated quantities - vars__.push_back(mu_w0); - vars__.push_back(mu_w1); - vars__.push_back(mu_w2); - vars__.push_back(mu_w3); - vars__.push_back(mu_gam); - vars__.push_back(mu_sig); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_rdt_happiness"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w0_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w1_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w2_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w3_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sig_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w0" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w3" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sig" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w0"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w3"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gam"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_sig"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w0_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w1_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w2_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w3_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sig_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w0" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w3" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sig" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w0"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w3"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gam"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_sig"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ts_par4_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ts_par4"); - reader.add_event(203, 201, "end", "model_ts_par4"); - return reader; -} - -class model_ts_par4 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > level1_choice; - vector > level2_choice; - vector > reward; - double trans_prob; -public: - model_ts_par4(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ts_par4(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ts_par4_namespace::model_ts_par4"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - context__.validate_dims("data initialization", "level1_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - level1_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level1_choice"); - pos__ = 0; - size_t level1_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level1_choice_limit_1__; ++i_1__) { - size_t level1_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level1_choice_limit_0__; ++i_0__) { - level1_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - context__.validate_dims("data initialization", "level2_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - level2_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level2_choice"); - pos__ = 0; - size_t level2_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level2_choice_limit_1__; ++i_1__) { - size_t level2_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level2_choice_limit_0__; ++i_0__) { - level2_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - context__.validate_dims("data initialization", "reward", "int", context__.to_vec(N,T)); - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - reward = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("reward"); - pos__ = 0; - size_t reward_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { - size_t reward_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { - reward[i_0__][i_1__] = vals_i__[pos__++]; - } - } - context__.validate_dims("data initialization", "trans_prob", "double", context__.to_vec()); - trans_prob = double(0); - vals_r__ = context__.vals_r("trans_prob"); - pos__ = 0; - trans_prob = vals_r__[pos__++]; - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],1); - check_less_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],2); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],1); - check_less_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],4); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],0); - check_less_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],1); - } - } - check_greater_or_equal(function__,"trans_prob",trans_prob,0); - check_less_or_equal(function__,"trans_prob",trans_prob,1); - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - validate_non_negative_index("a_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("w_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ts_par4() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("a_pr"))) - throw std::runtime_error("variable a_pr missing"); - vals_r__ = context__.vals_r("a_pr"); - pos__ = 0U; - validate_non_negative_index("a_pr", "N", N); - context__.validate_dims("initialization", "a_pr", "vector_d", context__.to_vec(N)); - vector_d a_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("w_pr"))) - throw std::runtime_error("variable w_pr missing"); - vals_r__ = context__.vals_r("w_pr"); - pos__ = 0U; - validate_non_negative_index("w_pr", "N", N); - context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); - vector_d w_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix a_pr; - (void) a_pr; // dummy to suppress unused var warning - if (jacobian__) - a_pr = in__.vector_constrain(N,lp__); - else - a_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix w_pr; - (void) w_pr; // dummy to suppress unused var warning - if (jacobian__) - w_pr = in__.vector_constrain(N,lp__); - else - w_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("a", "N", N); - Eigen::Matrix a(static_cast(N)); - (void) a; // dummy to suppress unused var warning - - stan::math::initialize(a, DUMMY_VAR__); - stan::math::fill(a,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(a, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a_pr,i,"a_pr",1)))), - "assigning variable a"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))), - "assigning variable beta"); - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"a",a,0); - check_less_or_equal(function__,"a",a,1); - check_greater_or_equal(function__,"beta",beta,0); - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(a_pr, 0, 1)); - lp_accum__.add(normal_log(beta_pr, 0, 1)); - lp_accum__.add(normal_log(pi_pr, 0, 1)); - lp_accum__.add(normal_log(w_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - stan::math::assign(v_mb, rep_vector(0.0,2)); - stan::math::assign(v_mf, rep_vector(0.0,6)); - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - if (as_bool(logical_eq(t,1))) { - - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta,i,"beta",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - lp_accum__.add(bernoulli_log(level1_choice_01, level1_prob_choice2)); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - lp_accum__.add(bernoulli_log(level2_choice_01, level2_prob_choice2)); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("a_pr"); - names__.push_back("beta_pr"); - names__.push_back("pi_pr"); - names__.push_back("w_pr"); - names__.push_back("a"); - names__.push_back("beta"); - names__.push_back("pi"); - names__.push_back("w"); - names__.push_back("mu_a"); - names__.push_back("mu_beta"); - names__.push_back("mu_pi"); - names__.push_back("mu_w"); - names__.push_back("log_lik"); - names__.push_back("y_pred_step1"); - names__.push_back("y_pred_step2"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ts_par4_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d a_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d w_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("a", "N", N); - Eigen::Matrix a(static_cast(N)); - (void) a; // dummy to suppress unused var warning - - stan::math::initialize(a, DUMMY_VAR__); - stan::math::fill(a,DUMMY_VAR__); - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(a, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a_pr,i,"a_pr",1)))), - "assigning variable a"); - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))), - "assigning variable beta"); - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"a",a,0); - check_less_or_equal(function__,"a",a,1); - check_greater_or_equal(function__,"beta",beta,0); - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_a; - (void) mu_a; // dummy to suppress unused var warning - - stan::math::initialize(mu_a, DUMMY_VAR__); - stan::math::fill(mu_a,DUMMY_VAR__); - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - local_scalar_t__ mu_w; - (void) mu_w; // dummy to suppress unused var warning - - stan::math::initialize(mu_w, DUMMY_VAR__); - stan::math::fill(mu_w,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred_step1", "N", N); - validate_non_negative_index("y_pred_step1", "T", T); - vector > y_pred_step1(N, (vector(T))); - stan::math::initialize(y_pred_step1, DUMMY_VAR__); - stan::math::fill(y_pred_step1,DUMMY_VAR__); - validate_non_negative_index("y_pred_step2", "N", N); - validate_non_negative_index("y_pred_step2", "T", T); - vector > y_pred_step2(N, (vector(T))); - stan::math::initialize(y_pred_step2, DUMMY_VAR__); - stan::math::fill(y_pred_step2,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step1"); - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step2"); - } - } - stan::math::assign(mu_a, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_beta, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_pi, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - stan::math::assign(mu_w, Phi_approx(get_base1(mu_p,4,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - stan::math::assign(v_mb, rep_vector(0.0,2)); - stan::math::assign(v_mf, rep_vector(0.0,6)); - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - if (as_bool(logical_eq(t,1))) { - - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta,i,"beta",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level1_choice_01,level1_prob_choice2))), - "assigning variable log_lik"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level2_choice_01,level2_prob_choice2))), - "assigning variable log_lik"); - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level1_prob_choice2, base_rng__), - "assigning variable y_pred_step1"); - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level2_prob_choice2, base_rng__), - "assigning variable y_pred_step2"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_a",mu_a,0); - check_less_or_equal(function__,"mu_a",mu_a,1); - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_greater_or_equal(function__,"mu_pi",mu_pi,0); - check_less_or_equal(function__,"mu_pi",mu_pi,5); - check_greater_or_equal(function__,"mu_w",mu_w,0); - check_less_or_equal(function__,"mu_w",mu_w,1); - - // write generated quantities - vars__.push_back(mu_a); - vars__.push_back(mu_beta); - vars__.push_back(mu_pi); - vars__.push_back(mu_w); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step1[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step2[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ts_par4"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ts_par6_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ts_par6"); - reader.add_event(212, 210, "end", "model_ts_par6"); - return reader; -} - -class model_ts_par6 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > level1_choice; - vector > level2_choice; - vector > reward; - double trans_prob; -public: - model_ts_par6(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ts_par6(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ts_par6_namespace::model_ts_par6"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - context__.validate_dims("data initialization", "level1_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - level1_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level1_choice"); - pos__ = 0; - size_t level1_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level1_choice_limit_1__; ++i_1__) { - size_t level1_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level1_choice_limit_0__; ++i_0__) { - level1_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - context__.validate_dims("data initialization", "level2_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - level2_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level2_choice"); - pos__ = 0; - size_t level2_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level2_choice_limit_1__; ++i_1__) { - size_t level2_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level2_choice_limit_0__; ++i_0__) { - level2_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - context__.validate_dims("data initialization", "reward", "int", context__.to_vec(N,T)); - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - reward = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("reward"); - pos__ = 0; - size_t reward_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { - size_t reward_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { - reward[i_0__][i_1__] = vals_i__[pos__++]; - } - } - context__.validate_dims("data initialization", "trans_prob", "double", context__.to_vec()); - trans_prob = double(0); - vals_r__ = context__.vals_r("trans_prob"); - pos__ = 0; - trans_prob = vals_r__[pos__++]; - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],1); - check_less_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],2); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],1); - check_less_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],4); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],0); - check_less_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],1); - } - } - check_greater_or_equal(function__,"trans_prob",trans_prob,0); - check_less_or_equal(function__,"trans_prob",trans_prob,1); - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "6", 6); - num_params_r__ += 6; - validate_non_negative_index("sigma", "6", 6); - num_params_r__ += 6; - validate_non_negative_index("a1_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta1_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("a2_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta2_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("w_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ts_par6() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "6", 6); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(6)); - vector_d mu_p(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "6", 6); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); - vector_d sigma(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("a1_pr"))) - throw std::runtime_error("variable a1_pr missing"); - vals_r__ = context__.vals_r("a1_pr"); - pos__ = 0U; - validate_non_negative_index("a1_pr", "N", N); - context__.validate_dims("initialization", "a1_pr", "vector_d", context__.to_vec(N)); - vector_d a1_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a1_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a1_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a1_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta1_pr"))) - throw std::runtime_error("variable beta1_pr missing"); - vals_r__ = context__.vals_r("beta1_pr"); - pos__ = 0U; - validate_non_negative_index("beta1_pr", "N", N); - context__.validate_dims("initialization", "beta1_pr", "vector_d", context__.to_vec(N)); - vector_d beta1_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta1_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta1_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta1_pr: ") + e.what()); - } - - if (!(context__.contains_r("a2_pr"))) - throw std::runtime_error("variable a2_pr missing"); - vals_r__ = context__.vals_r("a2_pr"); - pos__ = 0U; - validate_non_negative_index("a2_pr", "N", N); - context__.validate_dims("initialization", "a2_pr", "vector_d", context__.to_vec(N)); - vector_d a2_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a2_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a2_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a2_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta2_pr"))) - throw std::runtime_error("variable beta2_pr missing"); - vals_r__ = context__.vals_r("beta2_pr"); - pos__ = 0U; - validate_non_negative_index("beta2_pr", "N", N); - context__.validate_dims("initialization", "beta2_pr", "vector_d", context__.to_vec(N)); - vector_d beta2_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta2_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta2_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta2_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("w_pr"))) - throw std::runtime_error("variable w_pr missing"); - vals_r__ = context__.vals_r("w_pr"); - pos__ = 0U; - validate_non_negative_index("w_pr", "N", N); - context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); - vector_d w_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(6,lp__); - else - mu_p = in__.vector_constrain(6); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,6,lp__); - else - sigma = in__.vector_lb_constrain(0,6); - - Eigen::Matrix a1_pr; - (void) a1_pr; // dummy to suppress unused var warning - if (jacobian__) - a1_pr = in__.vector_constrain(N,lp__); - else - a1_pr = in__.vector_constrain(N); - - Eigen::Matrix beta1_pr; - (void) beta1_pr; // dummy to suppress unused var warning - if (jacobian__) - beta1_pr = in__.vector_constrain(N,lp__); - else - beta1_pr = in__.vector_constrain(N); - - Eigen::Matrix a2_pr; - (void) a2_pr; // dummy to suppress unused var warning - if (jacobian__) - a2_pr = in__.vector_constrain(N,lp__); - else - a2_pr = in__.vector_constrain(N); - - Eigen::Matrix beta2_pr; - (void) beta2_pr; // dummy to suppress unused var warning - if (jacobian__) - beta2_pr = in__.vector_constrain(N,lp__); - else - beta2_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix w_pr; - (void) w_pr; // dummy to suppress unused var warning - if (jacobian__) - w_pr = in__.vector_constrain(N,lp__); - else - w_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("a1", "N", N); - Eigen::Matrix a1(static_cast(N)); - (void) a1; // dummy to suppress unused var warning - - stan::math::initialize(a1, DUMMY_VAR__); - stan::math::fill(a1,DUMMY_VAR__); - validate_non_negative_index("beta1", "N", N); - Eigen::Matrix beta1(static_cast(N)); - (void) beta1; // dummy to suppress unused var warning - - stan::math::initialize(beta1, DUMMY_VAR__); - stan::math::fill(beta1,DUMMY_VAR__); - validate_non_negative_index("a2", "N", N); - Eigen::Matrix a2(static_cast(N)); - (void) a2; // dummy to suppress unused var warning - - stan::math::initialize(a2, DUMMY_VAR__); - stan::math::fill(a2,DUMMY_VAR__); - validate_non_negative_index("beta2", "N", N); - Eigen::Matrix beta2(static_cast(N)); - (void) beta2; // dummy to suppress unused var warning - - stan::math::initialize(beta2, DUMMY_VAR__); - stan::math::fill(beta2,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(a1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), - "assigning variable a1"); - stan::model::assign(beta1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), - "assigning variable beta1"); - stan::model::assign(a2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), - "assigning variable a2"); - stan::model::assign(beta2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), - "assigning variable beta2"); - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,6,"mu_p",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"a1",a1,0); - check_less_or_equal(function__,"a1",a1,1); - check_greater_or_equal(function__,"beta1",beta1,0); - check_greater_or_equal(function__,"a2",a2,0); - check_less_or_equal(function__,"a2",a2,1); - check_greater_or_equal(function__,"beta2",beta2,0); - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(a1_pr, 0, 1)); - lp_accum__.add(normal_log(beta1_pr, 0, 1)); - lp_accum__.add(normal_log(a2_pr, 0, 1)); - lp_accum__.add(normal_log(beta2_pr, 0, 1)); - lp_accum__.add(normal_log(pi_pr, 0, 1)); - lp_accum__.add(normal_log(w_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - stan::math::assign(v_mb, rep_vector(0.0,2)); - stan::math::assign(v_mf, rep_vector(0.0,6)); - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - if (as_bool(logical_eq(t,1))) { - - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - lp_accum__.add(bernoulli_log(level1_choice_01, level1_prob_choice2)); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - lp_accum__.add(bernoulli_log(level2_choice_01, level2_prob_choice2)); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("a1_pr"); - names__.push_back("beta1_pr"); - names__.push_back("a2_pr"); - names__.push_back("beta2_pr"); - names__.push_back("pi_pr"); - names__.push_back("w_pr"); - names__.push_back("a1"); - names__.push_back("beta1"); - names__.push_back("a2"); - names__.push_back("beta2"); - names__.push_back("pi"); - names__.push_back("w"); - names__.push_back("mu_a1"); - names__.push_back("mu_beta1"); - names__.push_back("mu_a2"); - names__.push_back("mu_beta2"); - names__.push_back("mu_pi"); - names__.push_back("mu_w"); - names__.push_back("log_lik"); - names__.push_back("y_pred_step1"); - names__.push_back("y_pred_step2"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ts_par6_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(6); - vector_d sigma = in__.vector_lb_constrain(0,6); - vector_d a1_pr = in__.vector_constrain(N); - vector_d beta1_pr = in__.vector_constrain(N); - vector_d a2_pr = in__.vector_constrain(N); - vector_d beta2_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d w_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a1_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta1_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a2_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta2_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("a1", "N", N); - Eigen::Matrix a1(static_cast(N)); - (void) a1; // dummy to suppress unused var warning - - stan::math::initialize(a1, DUMMY_VAR__); - stan::math::fill(a1,DUMMY_VAR__); - validate_non_negative_index("beta1", "N", N); - Eigen::Matrix beta1(static_cast(N)); - (void) beta1; // dummy to suppress unused var warning - - stan::math::initialize(beta1, DUMMY_VAR__); - stan::math::fill(beta1,DUMMY_VAR__); - validate_non_negative_index("a2", "N", N); - Eigen::Matrix a2(static_cast(N)); - (void) a2; // dummy to suppress unused var warning - - stan::math::initialize(a2, DUMMY_VAR__); - stan::math::fill(a2,DUMMY_VAR__); - validate_non_negative_index("beta2", "N", N); - Eigen::Matrix beta2(static_cast(N)); - (void) beta2; // dummy to suppress unused var warning - - stan::math::initialize(beta2, DUMMY_VAR__); - stan::math::fill(beta2,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(a1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), - "assigning variable a1"); - stan::model::assign(beta1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), - "assigning variable beta1"); - stan::model::assign(a2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), - "assigning variable a2"); - stan::model::assign(beta2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), - "assigning variable beta2"); - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,6,"mu_p",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"a1",a1,0); - check_less_or_equal(function__,"a1",a1,1); - check_greater_or_equal(function__,"beta1",beta1,0); - check_greater_or_equal(function__,"a2",a2,0); - check_less_or_equal(function__,"a2",a2,1); - check_greater_or_equal(function__,"beta2",beta2,0); - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_a1; - (void) mu_a1; // dummy to suppress unused var warning - - stan::math::initialize(mu_a1, DUMMY_VAR__); - stan::math::fill(mu_a1,DUMMY_VAR__); - local_scalar_t__ mu_beta1; - (void) mu_beta1; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta1, DUMMY_VAR__); - stan::math::fill(mu_beta1,DUMMY_VAR__); - local_scalar_t__ mu_a2; - (void) mu_a2; // dummy to suppress unused var warning - - stan::math::initialize(mu_a2, DUMMY_VAR__); - stan::math::fill(mu_a2,DUMMY_VAR__); - local_scalar_t__ mu_beta2; - (void) mu_beta2; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta2, DUMMY_VAR__); - stan::math::fill(mu_beta2,DUMMY_VAR__); - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - local_scalar_t__ mu_w; - (void) mu_w; // dummy to suppress unused var warning - - stan::math::initialize(mu_w, DUMMY_VAR__); - stan::math::fill(mu_w,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred_step1", "N", N); - validate_non_negative_index("y_pred_step1", "T", T); - vector > y_pred_step1(N, (vector(T))); - stan::math::initialize(y_pred_step1, DUMMY_VAR__); - stan::math::fill(y_pred_step1,DUMMY_VAR__); - validate_non_negative_index("y_pred_step2", "N", N); - validate_non_negative_index("y_pred_step2", "T", T); - vector > y_pred_step2(N, (vector(T))); - stan::math::initialize(y_pred_step2, DUMMY_VAR__); - stan::math::fill(y_pred_step2,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step1"); - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step2"); - } - } - stan::math::assign(mu_a1, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_beta1, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_a2, Phi_approx(get_base1(mu_p,3,"mu_p",1))); - stan::math::assign(mu_beta2, stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - stan::math::assign(mu_pi, (Phi_approx(get_base1(mu_p,5,"mu_p",1)) * 5)); - stan::math::assign(mu_w, Phi_approx(get_base1(mu_p,6,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - stan::math::assign(v_mb, rep_vector(0.0,2)); - stan::math::assign(v_mf, rep_vector(0.0,6)); - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - if (as_bool(logical_eq(t,1))) { - - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level1_choice_01,level1_prob_choice2))), - "assigning variable log_lik"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level2_choice_01,level2_prob_choice2))), - "assigning variable log_lik"); - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level1_prob_choice2, base_rng__), - "assigning variable y_pred_step1"); - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level2_prob_choice2, base_rng__), - "assigning variable y_pred_step2"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_a1",mu_a1,0); - check_less_or_equal(function__,"mu_a1",mu_a1,1); - check_greater_or_equal(function__,"mu_beta1",mu_beta1,0); - check_greater_or_equal(function__,"mu_a2",mu_a2,0); - check_less_or_equal(function__,"mu_a2",mu_a2,1); - check_greater_or_equal(function__,"mu_beta2",mu_beta2,0); - check_greater_or_equal(function__,"mu_pi",mu_pi,0); - check_less_or_equal(function__,"mu_pi",mu_pi,5); - check_greater_or_equal(function__,"mu_w",mu_w,0); - check_less_or_equal(function__,"mu_w",mu_w,1); - - // write generated quantities - vars__.push_back(mu_a1); - vars__.push_back(mu_beta1); - vars__.push_back(mu_a2); - vars__.push_back(mu_beta2); - vars__.push_back(mu_pi); - vars__.push_back(mu_w); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step1[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step2[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ts_par6"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ts_par7_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ts_par7"); - reader.add_event(216, 214, "end", "model_ts_par7"); - return reader; -} - -class model_ts_par7 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > level1_choice; - vector > level2_choice; - vector > reward; - double trans_prob; -public: - model_ts_par7(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ts_par7(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ts_par7_namespace::model_ts_par7"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - context__.validate_dims("data initialization", "level1_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - level1_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level1_choice"); - pos__ = 0; - size_t level1_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level1_choice_limit_1__; ++i_1__) { - size_t level1_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level1_choice_limit_0__; ++i_0__) { - level1_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - context__.validate_dims("data initialization", "level2_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - level2_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level2_choice"); - pos__ = 0; - size_t level2_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level2_choice_limit_1__; ++i_1__) { - size_t level2_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level2_choice_limit_0__; ++i_0__) { - level2_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - context__.validate_dims("data initialization", "reward", "int", context__.to_vec(N,T)); - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - reward = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("reward"); - pos__ = 0; - size_t reward_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { - size_t reward_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { - reward[i_0__][i_1__] = vals_i__[pos__++]; - } - } - context__.validate_dims("data initialization", "trans_prob", "double", context__.to_vec()); - trans_prob = double(0); - vals_r__ = context__.vals_r("trans_prob"); - pos__ = 0; - trans_prob = vals_r__[pos__++]; - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],1); - check_less_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],2); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],1); - check_less_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],4); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],0); - check_less_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],1); - } - } - check_greater_or_equal(function__,"trans_prob",trans_prob,0); - check_less_or_equal(function__,"trans_prob",trans_prob,1); - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "7", 7); - num_params_r__ += 7; - validate_non_negative_index("sigma", "7", 7); - num_params_r__ += 7; - validate_non_negative_index("a1_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta1_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("a2_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("beta2_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("w_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("lambda_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ts_par7() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "7", 7); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(7)); - vector_d mu_p(static_cast(7)); - for (int j1__ = 0U; j1__ < 7; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "7", 7); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(7)); - vector_d sigma(static_cast(7)); - for (int j1__ = 0U; j1__ < 7; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("a1_pr"))) - throw std::runtime_error("variable a1_pr missing"); - vals_r__ = context__.vals_r("a1_pr"); - pos__ = 0U; - validate_non_negative_index("a1_pr", "N", N); - context__.validate_dims("initialization", "a1_pr", "vector_d", context__.to_vec(N)); - vector_d a1_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a1_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a1_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a1_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta1_pr"))) - throw std::runtime_error("variable beta1_pr missing"); - vals_r__ = context__.vals_r("beta1_pr"); - pos__ = 0U; - validate_non_negative_index("beta1_pr", "N", N); - context__.validate_dims("initialization", "beta1_pr", "vector_d", context__.to_vec(N)); - vector_d beta1_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta1_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta1_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta1_pr: ") + e.what()); - } - - if (!(context__.contains_r("a2_pr"))) - throw std::runtime_error("variable a2_pr missing"); - vals_r__ = context__.vals_r("a2_pr"); - pos__ = 0U; - validate_non_negative_index("a2_pr", "N", N); - context__.validate_dims("initialization", "a2_pr", "vector_d", context__.to_vec(N)); - vector_d a2_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a2_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a2_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a2_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta2_pr"))) - throw std::runtime_error("variable beta2_pr missing"); - vals_r__ = context__.vals_r("beta2_pr"); - pos__ = 0U; - validate_non_negative_index("beta2_pr", "N", N); - context__.validate_dims("initialization", "beta2_pr", "vector_d", context__.to_vec(N)); - vector_d beta2_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta2_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta2_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta2_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("w_pr"))) - throw std::runtime_error("variable w_pr missing"); - vals_r__ = context__.vals_r("w_pr"); - pos__ = 0U; - validate_non_negative_index("w_pr", "N", N); - context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); - vector_d w_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); - } - - if (!(context__.contains_r("lambda_pr"))) - throw std::runtime_error("variable lambda_pr missing"); - vals_r__ = context__.vals_r("lambda_pr"); - pos__ = 0U; - validate_non_negative_index("lambda_pr", "N", N); - context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); - vector_d lambda_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(7,lp__); - else - mu_p = in__.vector_constrain(7); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,7,lp__); - else - sigma = in__.vector_lb_constrain(0,7); - - Eigen::Matrix a1_pr; - (void) a1_pr; // dummy to suppress unused var warning - if (jacobian__) - a1_pr = in__.vector_constrain(N,lp__); - else - a1_pr = in__.vector_constrain(N); - - Eigen::Matrix beta1_pr; - (void) beta1_pr; // dummy to suppress unused var warning - if (jacobian__) - beta1_pr = in__.vector_constrain(N,lp__); - else - beta1_pr = in__.vector_constrain(N); - - Eigen::Matrix a2_pr; - (void) a2_pr; // dummy to suppress unused var warning - if (jacobian__) - a2_pr = in__.vector_constrain(N,lp__); - else - a2_pr = in__.vector_constrain(N); - - Eigen::Matrix beta2_pr; - (void) beta2_pr; // dummy to suppress unused var warning - if (jacobian__) - beta2_pr = in__.vector_constrain(N,lp__); - else - beta2_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix w_pr; - (void) w_pr; // dummy to suppress unused var warning - if (jacobian__) - w_pr = in__.vector_constrain(N,lp__); - else - w_pr = in__.vector_constrain(N); - - Eigen::Matrix lambda_pr; - (void) lambda_pr; // dummy to suppress unused var warning - if (jacobian__) - lambda_pr = in__.vector_constrain(N,lp__); - else - lambda_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("a1", "N", N); - Eigen::Matrix a1(static_cast(N)); - (void) a1; // dummy to suppress unused var warning - - stan::math::initialize(a1, DUMMY_VAR__); - stan::math::fill(a1,DUMMY_VAR__); - validate_non_negative_index("beta1", "N", N); - Eigen::Matrix beta1(static_cast(N)); - (void) beta1; // dummy to suppress unused var warning - - stan::math::initialize(beta1, DUMMY_VAR__); - stan::math::fill(beta1,DUMMY_VAR__); - validate_non_negative_index("a2", "N", N); - Eigen::Matrix a2(static_cast(N)); - (void) a2; // dummy to suppress unused var warning - - stan::math::initialize(a2, DUMMY_VAR__); - stan::math::fill(a2,DUMMY_VAR__); - validate_non_negative_index("beta2", "N", N); - Eigen::Matrix beta2(static_cast(N)); - (void) beta2; // dummy to suppress unused var warning - - stan::math::initialize(beta2, DUMMY_VAR__); - stan::math::fill(beta2,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(a1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), - "assigning variable a1"); - stan::model::assign(beta1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), - "assigning variable beta1"); - stan::model::assign(a2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), - "assigning variable a2"); - stan::model::assign(beta2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), - "assigning variable beta2"); - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,6,"mu_p",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,7,"mu_p",1) + (get_base1(sigma,7,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))), - "assigning variable lambda"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"a1",a1,0); - check_less_or_equal(function__,"a1",a1,1); - check_greater_or_equal(function__,"beta1",beta1,0); - check_greater_or_equal(function__,"a2",a2,0); - check_less_or_equal(function__,"a2",a2,1); - check_greater_or_equal(function__,"beta2",beta2,0); - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,1); - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(a1_pr, 0, 1)); - lp_accum__.add(normal_log(beta1_pr, 0, 1)); - lp_accum__.add(normal_log(a2_pr, 0, 1)); - lp_accum__.add(normal_log(beta2_pr, 0, 1)); - lp_accum__.add(normal_log(pi_pr, 0, 1)); - lp_accum__.add(normal_log(w_pr, 0, 1)); - lp_accum__.add(normal_log(lambda_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - stan::math::assign(v_mb, rep_vector(0.0,2)); - stan::math::assign(v_mf, rep_vector(0.0,6)); - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - if (as_bool(logical_eq(t,1))) { - - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - lp_accum__.add(bernoulli_log(level1_choice_01, level1_prob_choice2)); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - lp_accum__.add(bernoulli_log(level2_choice_01, level2_prob_choice2)); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + ((get_base1(lambda,i,"lambda",1) * get_base1(a1,i,"a1",1)) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("a1_pr"); - names__.push_back("beta1_pr"); - names__.push_back("a2_pr"); - names__.push_back("beta2_pr"); - names__.push_back("pi_pr"); - names__.push_back("w_pr"); - names__.push_back("lambda_pr"); - names__.push_back("a1"); - names__.push_back("beta1"); - names__.push_back("a2"); - names__.push_back("beta2"); - names__.push_back("pi"); - names__.push_back("w"); - names__.push_back("lambda"); - names__.push_back("mu_a1"); - names__.push_back("mu_beta1"); - names__.push_back("mu_a2"); - names__.push_back("mu_beta2"); - names__.push_back("mu_pi"); - names__.push_back("mu_w"); - names__.push_back("mu_lambda"); - names__.push_back("log_lik"); - names__.push_back("y_pred_step1"); - names__.push_back("y_pred_step2"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(7); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(7); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ts_par7_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(7); - vector_d sigma = in__.vector_lb_constrain(0,7); - vector_d a1_pr = in__.vector_constrain(N); - vector_d beta1_pr = in__.vector_constrain(N); - vector_d a2_pr = in__.vector_constrain(N); - vector_d beta2_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d w_pr = in__.vector_constrain(N); - vector_d lambda_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 7; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 7; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a1_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta1_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a2_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta2_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("a1", "N", N); - Eigen::Matrix a1(static_cast(N)); - (void) a1; // dummy to suppress unused var warning - - stan::math::initialize(a1, DUMMY_VAR__); - stan::math::fill(a1,DUMMY_VAR__); - validate_non_negative_index("beta1", "N", N); - Eigen::Matrix beta1(static_cast(N)); - (void) beta1; // dummy to suppress unused var warning - - stan::math::initialize(beta1, DUMMY_VAR__); - stan::math::fill(beta1,DUMMY_VAR__); - validate_non_negative_index("a2", "N", N); - Eigen::Matrix a2(static_cast(N)); - (void) a2; // dummy to suppress unused var warning - - stan::math::initialize(a2, DUMMY_VAR__); - stan::math::fill(a2,DUMMY_VAR__); - validate_non_negative_index("beta2", "N", N); - Eigen::Matrix beta2(static_cast(N)); - (void) beta2; // dummy to suppress unused var warning - - stan::math::initialize(beta2, DUMMY_VAR__); - stan::math::fill(beta2,DUMMY_VAR__); - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(a1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), - "assigning variable a1"); - stan::model::assign(beta1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), - "assigning variable beta1"); - stan::model::assign(a2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), - "assigning variable a2"); - stan::model::assign(beta2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), - "assigning variable beta2"); - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,6,"mu_p",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,7,"mu_p",1) + (get_base1(sigma,7,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))), - "assigning variable lambda"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"a1",a1,0); - check_less_or_equal(function__,"a1",a1,1); - check_greater_or_equal(function__,"beta1",beta1,0); - check_greater_or_equal(function__,"a2",a2,0); - check_less_or_equal(function__,"a2",a2,1); - check_greater_or_equal(function__,"beta2",beta2,0); - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_a1; - (void) mu_a1; // dummy to suppress unused var warning - - stan::math::initialize(mu_a1, DUMMY_VAR__); - stan::math::fill(mu_a1,DUMMY_VAR__); - local_scalar_t__ mu_beta1; - (void) mu_beta1; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta1, DUMMY_VAR__); - stan::math::fill(mu_beta1,DUMMY_VAR__); - local_scalar_t__ mu_a2; - (void) mu_a2; // dummy to suppress unused var warning - - stan::math::initialize(mu_a2, DUMMY_VAR__); - stan::math::fill(mu_a2,DUMMY_VAR__); - local_scalar_t__ mu_beta2; - (void) mu_beta2; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta2, DUMMY_VAR__); - stan::math::fill(mu_beta2,DUMMY_VAR__); - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - local_scalar_t__ mu_w; - (void) mu_w; // dummy to suppress unused var warning - - stan::math::initialize(mu_w, DUMMY_VAR__); - stan::math::fill(mu_w,DUMMY_VAR__); - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred_step1", "N", N); - validate_non_negative_index("y_pred_step1", "T", T); - vector > y_pred_step1(N, (vector(T))); - stan::math::initialize(y_pred_step1, DUMMY_VAR__); - stan::math::fill(y_pred_step1,DUMMY_VAR__); - validate_non_negative_index("y_pred_step2", "N", N); - validate_non_negative_index("y_pred_step2", "T", T); - vector > y_pred_step2(N, (vector(T))); - stan::math::initialize(y_pred_step2, DUMMY_VAR__); - stan::math::fill(y_pred_step2,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step1"); - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step2"); - } - } - stan::math::assign(mu_a1, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_beta1, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - stan::math::assign(mu_a2, Phi_approx(get_base1(mu_p,3,"mu_p",1))); - stan::math::assign(mu_beta2, stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - stan::math::assign(mu_pi, (Phi_approx(get_base1(mu_p,5,"mu_p",1)) * 5)); - stan::math::assign(mu_w, Phi_approx(get_base1(mu_p,6,"mu_p",1))); - stan::math::assign(mu_lambda, Phi_approx(get_base1(mu_p,7,"mu_p",1))); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - stan::math::assign(v_mb, rep_vector(0.0,2)); - stan::math::assign(v_mf, rep_vector(0.0,6)); - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - if (as_bool(logical_eq(t,1))) { - - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level1_choice_01,level1_prob_choice2))), - "assigning variable log_lik"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level2_choice_01,level2_prob_choice2))), - "assigning variable log_lik"); - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level1_prob_choice2, base_rng__), - "assigning variable y_pred_step1"); - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level2_prob_choice2, base_rng__), - "assigning variable y_pred_step2"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + ((get_base1(lambda,i,"lambda",1) * get_base1(a1,i,"a1",1)) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_a1",mu_a1,0); - check_less_or_equal(function__,"mu_a1",mu_a1,1); - check_greater_or_equal(function__,"mu_beta1",mu_beta1,0); - check_greater_or_equal(function__,"mu_a2",mu_a2,0); - check_less_or_equal(function__,"mu_a2",mu_a2,1); - check_greater_or_equal(function__,"mu_beta2",mu_beta2,0); - check_greater_or_equal(function__,"mu_pi",mu_pi,0); - check_less_or_equal(function__,"mu_pi",mu_pi,5); - check_greater_or_equal(function__,"mu_w",mu_w,0); - check_less_or_equal(function__,"mu_w",mu_w,1); - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,1); - - // write generated quantities - vars__.push_back(mu_a1); - vars__.push_back(mu_beta1); - vars__.push_back(mu_a2); - vars__.push_back(mu_beta2); - vars__.push_back(mu_pi); - vars__.push_back(mu_w); - vars__.push_back(mu_lambda); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step1[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step2[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ts_par7"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ug_bayes_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ug_bayes"); - reader.add_event(166, 164, "end", "model_ug_bayes"); - return reader; -} - -class model_ug_bayes : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > offer; - vector > accept; - double initV; - double mu0; - double k0; - double sig20; - double nu0; -public: - model_ug_bayes(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ug_bayes(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ug_bayes_namespace::model_ug_bayes"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("offer", "N", N); - validate_non_negative_index("offer", "T", T); - context__.validate_dims("data initialization", "offer", "double", context__.to_vec(N,T)); - validate_non_negative_index("offer", "N", N); - validate_non_negative_index("offer", "T", T); - offer = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("offer"); - pos__ = 0; - size_t offer_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < offer_limit_1__; ++i_1__) { - size_t offer_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < offer_limit_0__; ++i_0__) { - offer[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("accept", "N", N); - validate_non_negative_index("accept", "T", T); - context__.validate_dims("data initialization", "accept", "int", context__.to_vec(N,T)); - validate_non_negative_index("accept", "N", N); - validate_non_negative_index("accept", "T", T); - accept = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("accept"); - pos__ = 0; - size_t accept_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < accept_limit_1__; ++i_1__) { - size_t accept_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < accept_limit_0__; ++i_0__) { - accept[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],-(1)); - check_less_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],1); - } - } - // initialize data variables - initV = double(0); - stan::math::fill(initV,DUMMY_VAR__); - mu0 = double(0); - stan::math::fill(mu0,DUMMY_VAR__); - k0 = double(0); - stan::math::fill(k0,DUMMY_VAR__); - sig20 = double(0); - stan::math::fill(sig20,DUMMY_VAR__); - nu0 = double(0); - stan::math::fill(nu0,DUMMY_VAR__); - - stan::math::assign(initV, 0.0); - stan::math::assign(mu0, 10.0); - stan::math::assign(k0, 4.0); - stan::math::assign(sig20, 4.0); - stan::math::assign(nu0, 10.0); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("Beta_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ug_bayes() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("Beta_pr"))) - throw std::runtime_error("variable Beta_pr missing"); - vals_r__ = context__.vals_r("Beta_pr"); - pos__ = 0U; - validate_non_negative_index("Beta_pr", "N", N); - context__.validate_dims("initialization", "Beta_pr", "vector_d", context__.to_vec(N)); - vector_d Beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("tau_pr"))) - throw std::runtime_error("variable tau_pr missing"); - vals_r__ = context__.vals_r("tau_pr"); - pos__ = 0U; - validate_non_negative_index("tau_pr", "N", N); - context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); - vector_d tau_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix Beta_pr; - (void) Beta_pr; // dummy to suppress unused var warning - if (jacobian__) - Beta_pr = in__.vector_constrain(N,lp__); - else - Beta_pr = in__.vector_constrain(N); - - Eigen::Matrix tau_pr; - (void) tau_pr; // dummy to suppress unused var warning - if (jacobian__) - tau_pr = in__.vector_constrain(N,lp__); - else - tau_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("alpha", "N", N); - vector alpha(N); - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("Beta", "N", N); - vector Beta(N); - stan::math::initialize(Beta, DUMMY_VAR__); - stan::math::fill(Beta,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - vector tau(N); - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), - "assigning variable alpha"); - stan::model::assign(Beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Beta_pr,i,"Beta_pr",1)))) * 10), - "assigning variable Beta"); - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), - "assigning variable tau"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Beta[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); - check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Beta[k0__]",Beta[k0__],0); - check_less_or_equal(function__,"Beta[k0__]",Beta[k0__],10); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); - check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); - } - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(alpha_pr, 0, 1.0)); - lp_accum__.add(normal_log(Beta_pr, 0, 1.0)); - lp_accum__.add(normal_log(tau_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ util; - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - local_scalar_t__ mu_old; - (void) mu_old; // dummy to suppress unused var warning - - stan::math::initialize(mu_old, DUMMY_VAR__); - stan::math::fill(mu_old,DUMMY_VAR__); - local_scalar_t__ mu_new; - (void) mu_new; // dummy to suppress unused var warning - - stan::math::initialize(mu_new, DUMMY_VAR__); - stan::math::fill(mu_new,DUMMY_VAR__); - local_scalar_t__ k_old; - (void) k_old; // dummy to suppress unused var warning - - stan::math::initialize(k_old, DUMMY_VAR__); - stan::math::fill(k_old,DUMMY_VAR__); - local_scalar_t__ k_new; - (void) k_new; // dummy to suppress unused var warning - - stan::math::initialize(k_new, DUMMY_VAR__); - stan::math::fill(k_new,DUMMY_VAR__); - local_scalar_t__ sig2_old; - (void) sig2_old; // dummy to suppress unused var warning - - stan::math::initialize(sig2_old, DUMMY_VAR__); - stan::math::fill(sig2_old,DUMMY_VAR__); - local_scalar_t__ sig2_new; - (void) sig2_new; // dummy to suppress unused var warning - - stan::math::initialize(sig2_new, DUMMY_VAR__); - stan::math::fill(sig2_new,DUMMY_VAR__); - local_scalar_t__ nu_old; - (void) nu_old; // dummy to suppress unused var warning - - stan::math::initialize(nu_old, DUMMY_VAR__); - stan::math::fill(nu_old,DUMMY_VAR__); - local_scalar_t__ nu_new; - (void) nu_new; // dummy to suppress unused var warning - - stan::math::initialize(nu_new, DUMMY_VAR__); - stan::math::fill(nu_new,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - - - stan::math::assign(mu_old, mu0); - stan::math::assign(k_old, k0); - stan::math::assign(sig2_old, sig20); - stan::math::assign(nu_old, nu0); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(k_new, (k_old + 1)); - stan::math::assign(nu_new, (nu_old + 1)); - stan::math::assign(mu_new, (((k_old / k_new) * mu_old) + ((1 / k_new) * get_base1(get_base1(offer,i,"offer",1),t,"offer",2)))); - stan::math::assign(sig2_new, (((nu_old / nu_new) * sig2_old) + (((1 / nu_new) * (k_old / k_new)) * pow((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old),2)))); - stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old)); - stan::math::assign(util, ((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((mu_new - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0))) - (get_base1(Beta,i,"Beta",1) * stan::math::fmax((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_new),0.0)))); - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2), (util * get_base1(tau,i,"tau",1)))); - stan::math::assign(mu_old, mu_new); - stan::math::assign(sig2_old, sig2_new); - stan::math::assign(k_old, k_new); - stan::math::assign(nu_old, nu_new); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("alpha_pr"); - names__.push_back("Beta_pr"); - names__.push_back("tau_pr"); - names__.push_back("alpha"); - names__.push_back("Beta"); - names__.push_back("tau"); - names__.push_back("mu_alpha"); - names__.push_back("mu_Beta"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ug_bayes_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d Beta_pr = in__.vector_constrain(N); - vector_d tau_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("alpha", "N", N); - vector alpha(N); - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("Beta", "N", N); - vector Beta(N); - stan::math::initialize(Beta, DUMMY_VAR__); - stan::math::fill(Beta,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - vector tau(N); - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), - "assigning variable alpha"); - stan::model::assign(Beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Beta_pr,i,"Beta_pr",1)))) * 10), - "assigning variable Beta"); - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), - "assigning variable tau"); - } - - // validate transformed parameters - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); - check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Beta[k0__]",Beta[k0__],0); - check_less_or_equal(function__,"Beta[k0__]",Beta[k0__],10); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); - check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); - } - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - local_scalar_t__ mu_Beta; - (void) mu_Beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_Beta, DUMMY_VAR__); - stan::math::fill(mu_Beta,DUMMY_VAR__); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 20)); - stan::math::assign(mu_Beta, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 10)); - stan::math::assign(mu_tau, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ util; - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - local_scalar_t__ mu_old; - (void) mu_old; // dummy to suppress unused var warning - - stan::math::initialize(mu_old, DUMMY_VAR__); - stan::math::fill(mu_old,DUMMY_VAR__); - local_scalar_t__ mu_new; - (void) mu_new; // dummy to suppress unused var warning - - stan::math::initialize(mu_new, DUMMY_VAR__); - stan::math::fill(mu_new,DUMMY_VAR__); - local_scalar_t__ k_old; - (void) k_old; // dummy to suppress unused var warning - - stan::math::initialize(k_old, DUMMY_VAR__); - stan::math::fill(k_old,DUMMY_VAR__); - local_scalar_t__ k_new; - (void) k_new; // dummy to suppress unused var warning - - stan::math::initialize(k_new, DUMMY_VAR__); - stan::math::fill(k_new,DUMMY_VAR__); - local_scalar_t__ sig2_old; - (void) sig2_old; // dummy to suppress unused var warning - - stan::math::initialize(sig2_old, DUMMY_VAR__); - stan::math::fill(sig2_old,DUMMY_VAR__); - local_scalar_t__ sig2_new; - (void) sig2_new; // dummy to suppress unused var warning - - stan::math::initialize(sig2_new, DUMMY_VAR__); - stan::math::fill(sig2_new,DUMMY_VAR__); - local_scalar_t__ nu_old; - (void) nu_old; // dummy to suppress unused var warning - - stan::math::initialize(nu_old, DUMMY_VAR__); - stan::math::fill(nu_old,DUMMY_VAR__); - local_scalar_t__ nu_new; - (void) nu_new; // dummy to suppress unused var warning - - stan::math::initialize(nu_new, DUMMY_VAR__); - stan::math::fill(nu_new,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - - - stan::math::assign(mu_old, mu0); - stan::math::assign(k_old, k0); - stan::math::assign(sig2_old, sig20); - stan::math::assign(nu_old, nu0); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(k_new, (k_old + 1)); - stan::math::assign(nu_new, (nu_old + 1)); - stan::math::assign(mu_new, (((k_old / k_new) * mu_old) + ((1 / k_new) * get_base1(get_base1(offer,i,"offer",1),t,"offer",2)))); - stan::math::assign(sig2_new, (((nu_old / nu_new) * sig2_old) + (((1 / nu_new) * (k_old / k_new)) * pow((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old),2)))); - stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old)); - stan::math::assign(util, ((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((mu_new - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0))) - (get_base1(Beta,i,"Beta",1) * stan::math::fmax((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_new),0.0)))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2),(util * get_base1(tau,i,"tau",1))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((util * get_base1(tau,i,"tau",1))), base_rng__), - "assigning variable y_pred"); - stan::math::assign(mu_old, mu_new); - stan::math::assign(sig2_old, sig2_new); - stan::math::assign(k_old, k_new); - stan::math::assign(nu_old, nu_new); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,20); - check_greater_or_equal(function__,"mu_Beta",mu_Beta,0); - check_less_or_equal(function__,"mu_Beta",mu_Beta,10); - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - check_less_or_equal(function__,"mu_tau",mu_tau,10); - - // write generated quantities - vars__.push_back(mu_alpha); - vars__.push_back(mu_Beta); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ug_bayes"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ug_delta_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ug_delta"); - reader.add_event(128, 126, "end", "model_ug_delta"); - return reader; -} - -class model_ug_delta : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > offer; - vector > accept; -public: - model_ug_delta(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ug_delta(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ug_delta_namespace::model_ug_delta"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("offer", "N", N); - validate_non_negative_index("offer", "T", T); - context__.validate_dims("data initialization", "offer", "double", context__.to_vec(N,T)); - validate_non_negative_index("offer", "N", N); - validate_non_negative_index("offer", "T", T); - offer = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("offer"); - pos__ = 0; - size_t offer_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < offer_limit_1__; ++i_1__) { - size_t offer_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < offer_limit_0__; ++i_0__) { - offer[i_0__][i_1__] = vals_r__[pos__++]; - } - } - validate_non_negative_index("accept", "N", N); - validate_non_negative_index("accept", "T", T); - context__.validate_dims("data initialization", "accept", "int", context__.to_vec(N,T)); - validate_non_negative_index("accept", "N", N); - validate_non_negative_index("accept", "T", T); - accept = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("accept"); - pos__ = 0; - size_t accept_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < accept_limit_1__; ++i_1__) { - size_t accept_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < accept_limit_0__; ++i_0__) { - accept[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],-(1)); - check_less_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("tau_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ug_delta() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("tau_pr"))) - throw std::runtime_error("variable tau_pr missing"); - vals_r__ = context__.vals_r("tau_pr"); - pos__ = 0U; - validate_non_negative_index("tau_pr", "N", N); - context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); - vector_d tau_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix tau_pr; - (void) tau_pr; // dummy to suppress unused var warning - if (jacobian__) - tau_pr = in__.vector_constrain(N,lp__); - else - tau_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("ep", "N", N); - vector ep(N); - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - vector alpha(N); - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - vector tau(N); - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), - "assigning variable tau"); - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), - "assigning variable alpha"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"ep[k0__]",ep[k0__],0); - check_less_or_equal(function__,"ep[k0__]",ep[k0__],1); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); - check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); - check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); - } - - // model body - - lp_accum__.add(normal_log(mu_p, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - lp_accum__.add(normal_log(alpha_pr, 0, 1.0)); - lp_accum__.add(normal_log(tau_pr, 0, 1.0)); - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ f; - (void) f; // dummy to suppress unused var warning - - stan::math::initialize(f, DUMMY_VAR__); - stan::math::fill(f,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - local_scalar_t__ util; - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - - - stan::math::assign(f, 10.0); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - f)); - stan::math::assign(util, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((f - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0)))); - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2), (util * get_base1(tau,i,"tau",1)))); - stan::math::assign(f, stan::model::deep_copy((f + (get_base1(ep,i,"ep",1) * PE)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("ep_pr"); - names__.push_back("alpha_pr"); - names__.push_back("tau_pr"); - names__.push_back("ep"); - names__.push_back("alpha"); - names__.push_back("tau"); - names__.push_back("mu_ep"); - names__.push_back("mu_tau"); - names__.push_back("mu_alpha"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ug_delta_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d ep_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d tau_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("ep", "N", N); - vector ep(N); - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - validate_non_negative_index("alpha", "N", N); - vector alpha(N); - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - validate_non_negative_index("tau", "N", N); - vector tau(N); - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), - "assigning variable tau"); - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), - "assigning variable alpha"); - } - - // validate transformed parameters - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"ep[k0__]",ep[k0__],0); - check_less_or_equal(function__,"ep[k0__]",ep[k0__],1); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); - check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); - check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); - } - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - stan::math::assign(mu_tau, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 10)); - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 20)); - - for (int i = 1; i <= N; ++i) { - { - local_scalar_t__ f; - (void) f; // dummy to suppress unused var warning - - stan::math::initialize(f, DUMMY_VAR__); - stan::math::fill(f,DUMMY_VAR__); - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - local_scalar_t__ util; - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - - - stan::math::assign(f, 10.0); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0.0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - f)); - stan::math::assign(util, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((f - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0)))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2),(util * get_base1(tau,i,"tau",1))))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((util * get_base1(tau,i,"tau",1))), base_rng__), - "assigning variable y_pred"); - stan::math::assign(f, stan::model::deep_copy((f + (get_base1(ep,i,"ep",1) * PE)))); - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - check_less_or_equal(function__,"mu_tau",mu_tau,10); - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,20); - - // write generated quantities - vars__.push_back(mu_ep); - vars__.push_back(mu_tau); - vars__.push_back(mu_alpha); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ug_delta"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_wcs_sql_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_wcs_sql"); - reader.add_event(168, 166, "end", "model_wcs_sql"); - return reader; -} - -class model_wcs_sql : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > > choice; - vector > outcome; - vector > choice_match_att; - vector deck_match_rule; - matrix_d initAtt; - matrix_d unit; -public: - model_wcs_sql(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_wcs_sql(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_wcs_sql_namespace::model_wcs_sql"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "4", 4); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,4,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "4", 4); - validate_non_negative_index("choice", "T", T); - choice = std::vector > >(N,std::vector >(4,std::vector(T,int(0)))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < choice_limit_2__; ++i_2__) { - size_t choice_limit_1__ = 4; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__][i_2__] = vals_i__[pos__++]; - } - } - } - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "int", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_i__[pos__++]; - } - } - validate_non_negative_index("choice_match_att", "N", N); - validate_non_negative_index("choice_match_att", "T", T); - validate_non_negative_index("choice_match_att", "1", 1); - validate_non_negative_index("choice_match_att", "3", 3); - context__.validate_dims("data initialization", "choice_match_att", "matrix_d", context__.to_vec(N,T,1,3)); - validate_non_negative_index("choice_match_att", "N", N); - validate_non_negative_index("choice_match_att", "T", T); - validate_non_negative_index("choice_match_att", "1", 1); - validate_non_negative_index("choice_match_att", "3", 3); - choice_match_att = std::vector >(N,std::vector(T,matrix_d(static_cast(1),static_cast(3)))); - vals_r__ = context__.vals_r("choice_match_att"); - pos__ = 0; - size_t choice_match_att_m_mat_lim__ = 1; - size_t choice_match_att_n_mat_lim__ = 3; - for (size_t n_mat__ = 0; n_mat__ < choice_match_att_n_mat_lim__; ++n_mat__) { - for (size_t m_mat__ = 0; m_mat__ < choice_match_att_m_mat_lim__; ++m_mat__) { - size_t choice_match_att_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_match_att_limit_1__; ++i_1__) { - size_t choice_match_att_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_match_att_limit_0__; ++i_0__) { - choice_match_att[i_0__][i_1__](m_mat__,n_mat__) = vals_r__[pos__++]; - } - } - } - } - validate_non_negative_index("deck_match_rule", "T", T); - validate_non_negative_index("deck_match_rule", "3", 3); - validate_non_negative_index("deck_match_rule", "4", 4); - context__.validate_dims("data initialization", "deck_match_rule", "matrix_d", context__.to_vec(T,3,4)); - validate_non_negative_index("deck_match_rule", "T", T); - validate_non_negative_index("deck_match_rule", "3", 3); - validate_non_negative_index("deck_match_rule", "4", 4); - deck_match_rule = std::vector(T,matrix_d(static_cast(3),static_cast(4))); - vals_r__ = context__.vals_r("deck_match_rule"); - pos__ = 0; - size_t deck_match_rule_m_mat_lim__ = 3; - size_t deck_match_rule_n_mat_lim__ = 4; - for (size_t n_mat__ = 0; n_mat__ < deck_match_rule_n_mat_lim__; ++n_mat__) { - for (size_t m_mat__ = 0; m_mat__ < deck_match_rule_m_mat_lim__; ++m_mat__) { - size_t deck_match_rule_limit_0__ = T; - for (size_t i_0__ = 0; i_0__ < deck_match_rule_limit_0__; ++i_0__) { - deck_match_rule[i_0__](m_mat__,n_mat__) = vals_r__[pos__++]; - } - } - } - - // validate, data variables - check_greater_or_equal(function__,"N",N,1); - check_greater_or_equal(function__,"T",T,1); - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],40); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < 4; ++k1__) { - for (int k2__ = 0; k2__ < T; ++k2__) { - check_greater_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],0); - check_less_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],4); - } - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"outcome[k0__][k1__]",outcome[k0__][k1__],-(1)); - check_less_or_equal(function__,"outcome[k0__][k1__]",outcome[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice_match_att[k0__][k1__]",choice_match_att[k0__][k1__],0); - check_less_or_equal(function__,"choice_match_att[k0__][k1__]",choice_match_att[k0__][k1__],1); - } - } - for (int k0__ = 0; k0__ < T; ++k0__) { - check_greater_or_equal(function__,"deck_match_rule[k0__]",deck_match_rule[k0__],0); - check_less_or_equal(function__,"deck_match_rule[k0__]",deck_match_rule[k0__],1); - } - // initialize data variables - validate_non_negative_index("initAtt", "1", 1); - validate_non_negative_index("initAtt", "3", 3); - initAtt = matrix_d(static_cast(1),static_cast(3)); - stan::math::fill(initAtt,DUMMY_VAR__); - validate_non_negative_index("unit", "1", 1); - validate_non_negative_index("unit", "3", 3); - unit = matrix_d(static_cast(1),static_cast(3)); - stan::math::fill(unit,DUMMY_VAR__); - - stan::math::assign(initAtt, rep_matrix((1.0 / 3.0),1,3)); - stan::math::assign(unit, rep_matrix(1.0,1,3)); - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - validate_non_negative_index("mu_pr", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - validate_non_negative_index("r_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("p_pr", "N", N); - num_params_r__ += N; - validate_non_negative_index("d_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_wcs_sql() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_pr"))) - throw std::runtime_error("variable mu_pr missing"); - vals_r__ = context__.vals_r("mu_pr"); - pos__ = 0U; - validate_non_negative_index("mu_pr", "3", 3); - context__.validate_dims("initialization", "mu_pr", "vector_d", context__.to_vec(3)); - vector_d mu_pr(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_pr: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("r_pr"))) - throw std::runtime_error("variable r_pr missing"); - vals_r__ = context__.vals_r("r_pr"); - pos__ = 0U; - validate_non_negative_index("r_pr", "N", N); - context__.validate_dims("initialization", "r_pr", "vector_d", context__.to_vec(N)); - vector_d r_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - r_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(r_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable r_pr: ") + e.what()); - } - - if (!(context__.contains_r("p_pr"))) - throw std::runtime_error("variable p_pr missing"); - vals_r__ = context__.vals_r("p_pr"); - pos__ = 0U; - validate_non_negative_index("p_pr", "N", N); - context__.validate_dims("initialization", "p_pr", "vector_d", context__.to_vec(N)); - vector_d p_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - p_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(p_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable p_pr: ") + e.what()); - } - - if (!(context__.contains_r("d_pr"))) - throw std::runtime_error("variable d_pr missing"); - vals_r__ = context__.vals_r("d_pr"); - pos__ = 0U; - validate_non_negative_index("d_pr", "N", N); - context__.validate_dims("initialization", "d_pr", "vector_d", context__.to_vec(N)); - vector_d d_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - d_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(d_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable d_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_pr; - (void) mu_pr; // dummy to suppress unused var warning - if (jacobian__) - mu_pr = in__.vector_constrain(3,lp__); - else - mu_pr = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix r_pr; - (void) r_pr; // dummy to suppress unused var warning - if (jacobian__) - r_pr = in__.vector_constrain(N,lp__); - else - r_pr = in__.vector_constrain(N); - - Eigen::Matrix p_pr; - (void) p_pr; // dummy to suppress unused var warning - if (jacobian__) - p_pr = in__.vector_constrain(N,lp__); - else - p_pr = in__.vector_constrain(N); - - Eigen::Matrix d_pr; - (void) d_pr; // dummy to suppress unused var warning - if (jacobian__) - d_pr = in__.vector_constrain(N,lp__); - else - d_pr = in__.vector_constrain(N); - - - // transformed parameters - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - validate_non_negative_index("p", "N", N); - Eigen::Matrix p(static_cast(N)); - (void) p; // dummy to suppress unused var warning - - stan::math::initialize(p, DUMMY_VAR__); - stan::math::fill(p,DUMMY_VAR__); - validate_non_negative_index("d", "N", N); - Eigen::Matrix d(static_cast(N)); - (void) d; // dummy to suppress unused var warning - - stan::math::initialize(d, DUMMY_VAR__); - stan::math::fill(d,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - stan::model::assign(p, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(p_pr,i,"p_pr",1)))), - "assigning variable p"); - stan::model::assign(d, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(d_pr,i,"d_pr",1)))) * 5), - "assigning variable d"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(r(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: r" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(p(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: p" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(d(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: d" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - check_greater_or_equal(function__,"p",p,0); - check_less_or_equal(function__,"p",p,1); - check_greater_or_equal(function__,"d",d,0); - - // model body - - lp_accum__.add(normal_log(mu_pr, 0, 1)); - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - lp_accum__.add(normal_log(r_pr, 0, 1)); - lp_accum__.add(normal_log(p_pr, 0, 1)); - lp_accum__.add(normal_log(d_pr, 0, 1)); - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("pred_prob_mat", "4", 4); - Eigen::Matrix pred_prob_mat(static_cast(4)); - (void) pred_prob_mat; // dummy to suppress unused var warning - - stan::math::initialize(pred_prob_mat, DUMMY_VAR__); - stan::math::fill(pred_prob_mat,DUMMY_VAR__); - validate_non_negative_index("subj_att", "1", 1); - validate_non_negative_index("subj_att", "3", 3); - Eigen::Matrix subj_att(static_cast(1),static_cast(3)); - (void) subj_att; // dummy to suppress unused var warning - - stan::math::initialize(subj_att, DUMMY_VAR__); - stan::math::fill(subj_att,DUMMY_VAR__); - validate_non_negative_index("att_signal", "1", 1); - validate_non_negative_index("att_signal", "3", 3); - Eigen::Matrix att_signal(static_cast(1),static_cast(3)); - (void) att_signal; // dummy to suppress unused var warning - - stan::math::initialize(att_signal, DUMMY_VAR__); - stan::math::fill(att_signal,DUMMY_VAR__); - validate_non_negative_index("tmpatt", "1", 1); - validate_non_negative_index("tmpatt", "3", 3); - Eigen::Matrix tmpatt(static_cast(1),static_cast(3)); - (void) tmpatt; // dummy to suppress unused var warning - - stan::math::initialize(tmpatt, DUMMY_VAR__); - stan::math::fill(tmpatt,DUMMY_VAR__); - validate_non_negative_index("tmpp", "4", 4); - Eigen::Matrix tmpp(static_cast(4)); - (void) tmpp; // dummy to suppress unused var warning - - stan::math::initialize(tmpp, DUMMY_VAR__); - stan::math::fill(tmpp,DUMMY_VAR__); - - - stan::math::assign(subj_att, initAtt); - stan::math::assign(pred_prob_mat, to_vector(multiply(subj_att,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule")))); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - lp_accum__.add(multinomial_log(stan::model::rvalue(choice, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), "choice"), pred_prob_mat)); - if (as_bool(logical_eq(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),1))) { - - stan::math::assign(att_signal, elt_multiply(subj_att,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2))); - stan::math::assign(att_signal, stan::model::deep_copy(divide(att_signal,sum(att_signal)))); - stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(r,i,"r",1)),subj_att),multiply(get_base1(r,i,"r",1),att_signal))); - } else { - - stan::math::assign(att_signal, elt_multiply(subj_att,subtract(unit,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2)))); - stan::math::assign(att_signal, stan::model::deep_copy(divide(att_signal,sum(att_signal)))); - stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(p,i,"p",1)),subj_att),multiply(get_base1(p,i,"p",1),att_signal))); - } - stan::math::assign(subj_att, add(multiply(divide(tmpatt,sum(tmpatt)),0.99980000000000002),0.0001)); - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,1,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,2,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(3), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,3,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - if (as_bool(logical_lt(t,get_base1(Tsubj,i,"Tsubj",1)))) { - - stan::math::assign(tmpp, add(multiply(to_vector(multiply(tmpatt,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni((t + 1)), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule"))),0.99980000000000002),0.0001)); - stan::math::assign(pred_prob_mat, divide(tmpp,sum(tmpp))); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_pr"); - names__.push_back("sigma"); - names__.push_back("r_pr"); - names__.push_back("p_pr"); - names__.push_back("d_pr"); - names__.push_back("r"); - names__.push_back("p"); - names__.push_back("d"); - names__.push_back("mu_r"); - names__.push_back("mu_p"); - names__.push_back("mu_d"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(4); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_wcs_sql_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_pr = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d r_pr = in__.vector_constrain(N); - vector_d p_pr = in__.vector_constrain(N); - vector_d d_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(p_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(d_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - validate_non_negative_index("p", "N", N); - Eigen::Matrix p(static_cast(N)); - (void) p; // dummy to suppress unused var warning - - stan::math::initialize(p, DUMMY_VAR__); - stan::math::fill(p,DUMMY_VAR__); - validate_non_negative_index("d", "N", N); - Eigen::Matrix d(static_cast(N)); - (void) d; // dummy to suppress unused var warning - - stan::math::initialize(d, DUMMY_VAR__); - stan::math::fill(d,DUMMY_VAR__); - - - for (int i = 1; i <= N; ++i) { - - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - stan::model::assign(p, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(p_pr,i,"p_pr",1)))), - "assigning variable p"); - stan::model::assign(d, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(d_pr,i,"d_pr",1)))) * 5), - "assigning variable d"); - } - - // validate transformed parameters - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - check_greater_or_equal(function__,"p",p,0); - check_less_or_equal(function__,"p",p,1); - check_greater_or_equal(function__,"d",d,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(d[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - local_scalar_t__ mu_r; - (void) mu_r; // dummy to suppress unused var warning - - stan::math::initialize(mu_r, DUMMY_VAR__); - stan::math::fill(mu_r,DUMMY_VAR__); - local_scalar_t__ mu_p; - (void) mu_p; // dummy to suppress unused var warning - - stan::math::initialize(mu_p, DUMMY_VAR__); - stan::math::fill(mu_p,DUMMY_VAR__); - local_scalar_t__ mu_d; - (void) mu_d; // dummy to suppress unused var warning - - stan::math::initialize(mu_d, DUMMY_VAR__); - stan::math::fill(mu_d,DUMMY_VAR__); - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "4", 4); - validate_non_negative_index("y_pred", "T", T); - vector > > y_pred(N, (vector >(4, (vector(T, 0))))); - stan::math::fill(y_pred, std::numeric_limits::min()); - - - for (int i = 1; i <= N; ++i) { - - for (int t = 1; t <= T; ++t) { - - for (int deck = 1; deck <= 4; ++deck) { - - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(deck), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - -(1), - "assigning variable y_pred"); - } - } - } - stan::math::assign(mu_r, Phi_approx(get_base1(mu_pr,1,"mu_pr",1))); - stan::math::assign(mu_p, Phi_approx(get_base1(mu_pr,2,"mu_pr",1))); - stan::math::assign(mu_d, (Phi_approx(get_base1(mu_pr,3,"mu_pr",1)) * 5)); - - for (int i = 1; i <= N; ++i) { - { - validate_non_negative_index("subj_att", "1", 1); - validate_non_negative_index("subj_att", "3", 3); - Eigen::Matrix subj_att(static_cast(1),static_cast(3)); - (void) subj_att; // dummy to suppress unused var warning - - stan::math::initialize(subj_att, DUMMY_VAR__); - stan::math::fill(subj_att,DUMMY_VAR__); - validate_non_negative_index("att_signal", "1", 1); - validate_non_negative_index("att_signal", "3", 3); - Eigen::Matrix att_signal(static_cast(1),static_cast(3)); - (void) att_signal; // dummy to suppress unused var warning - - stan::math::initialize(att_signal, DUMMY_VAR__); - stan::math::fill(att_signal,DUMMY_VAR__); - validate_non_negative_index("pred_prob_mat", "4", 4); - Eigen::Matrix pred_prob_mat(static_cast(4)); - (void) pred_prob_mat; // dummy to suppress unused var warning - - stan::math::initialize(pred_prob_mat, DUMMY_VAR__); - stan::math::fill(pred_prob_mat,DUMMY_VAR__); - validate_non_negative_index("tmpatt", "1", 1); - validate_non_negative_index("tmpatt", "3", 3); - Eigen::Matrix tmpatt(static_cast(1),static_cast(3)); - (void) tmpatt; // dummy to suppress unused var warning - - stan::math::initialize(tmpatt, DUMMY_VAR__); - stan::math::fill(tmpatt,DUMMY_VAR__); - validate_non_negative_index("tmpp", "4", 4); - Eigen::Matrix tmpp(static_cast(4)); - (void) tmpp; // dummy to suppress unused var warning - - stan::math::initialize(tmpp, DUMMY_VAR__); - stan::math::fill(tmpp,DUMMY_VAR__); - - - stan::math::assign(subj_att, initAtt); - stan::math::assign(pred_prob_mat, to_vector(multiply(subj_att,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule")))); - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + multinomial_log(stan::model::rvalue(choice, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), "choice"),pred_prob_mat))), - "assigning variable log_lik"); - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - multinomial_rng(pred_prob_mat,1, base_rng__), - "assigning variable y_pred"); - if (as_bool(logical_eq(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),1))) { - - stan::math::assign(att_signal, elt_multiply(subj_att,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2))); - stan::math::assign(att_signal, stan::model::deep_copy(divide(att_signal,sum(att_signal)))); - stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(r,i,"r",1)),subj_att),multiply(get_base1(r,i,"r",1),att_signal))); - } else { - - stan::math::assign(att_signal, elt_multiply(subj_att,subtract(unit,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2)))); - stan::math::assign(att_signal, stan::model::deep_copy(divide(att_signal,sum(att_signal)))); - stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(p,i,"p",1)),subj_att),multiply(get_base1(p,i,"p",1),att_signal))); - } - stan::math::assign(subj_att, add(multiply(divide(tmpatt,sum(tmpatt)),0.99980000000000002),0.0001)); - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,1,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,2,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(3), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,3,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - if (as_bool(logical_lt(t,get_base1(Tsubj,i,"Tsubj",1)))) { - - stan::math::assign(tmpp, add(multiply(to_vector(multiply(tmpatt,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni((t + 1)), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule"))),0.99980000000000002),0.0001)); - stan::math::assign(pred_prob_mat, divide(tmpp,sum(tmpp))); - } - } - } - } - - // validate generated quantities - check_greater_or_equal(function__,"mu_r",mu_r,0); - check_less_or_equal(function__,"mu_r",mu_r,1); - check_greater_or_equal(function__,"mu_p",mu_p,0); - check_less_or_equal(function__,"mu_p",mu_p,5); - check_greater_or_equal(function__,"mu_d",mu_d,0); - check_less_or_equal(function__,"mu_d",mu_d,5); - - // write generated quantities - vars__.push_back(mu_r); - vars__.push_back(mu_p); - vars__.push_back(mu_d); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < 4; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__][k_2__]); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_wcs_sql"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "p_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_d"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= 4; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "p_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_d"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= 4; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -#endif diff --git a/src/include/models.hpp.bak b/src/include/models.hpp.bak deleted file mode 100755 index a509d56c..00000000 --- a/src/include/models.hpp.bak +++ /dev/null @@ -1,55681 +0,0 @@ -/* - hBayesDM is distributed under the terms of the GNU General Public - License but without any warranty. See the GNU General Public - License for more details. -*/ -#ifndef MODELS_HPP -#define MODELS_HPP -#define STAN__SERVICES__COMMAND_HPP -#include -// Code generated by Stan version 2.18.0 - -#include - -namespace model_bandit2arm_delta_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_bandit2arm_delta"); - reader.add_event(108, 106, "end", "model_bandit2arm_delta"); - return reader; -} - -class model_bandit2arm_delta : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_bandit2arm_delta(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_bandit2arm_delta(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_bandit2arm_delta_namespace::model_bandit2arm_delta"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - current_statement_begin__ = 6; - // initialize data variables - current_statement_begin__ = 9; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 10; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 9; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 19; - validate_non_negative_index("A_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("tau_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_bandit2arm_delta() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("A_pr"))) - throw std::runtime_error("variable A_pr missing"); - vals_r__ = context__.vals_r("A_pr"); - pos__ = 0U; - validate_non_negative_index("A_pr", "N", N); - context__.validate_dims("initialization", "A_pr", "vector_d", context__.to_vec(N)); - vector_d A_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - A_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(A_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A_pr: ") + e.what()); - } - - if (!(context__.contains_r("tau_pr"))) - throw std::runtime_error("variable tau_pr missing"); - vals_r__ = context__.vals_r("tau_pr"); - pos__ = 0U; - validate_non_negative_index("tau_pr", "N", N); - context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); - vector_d tau_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix A_pr; - (void) A_pr; // dummy to suppress unused var warning - if (jacobian__) - A_pr = in__.vector_constrain(N,lp__); - else - A_pr = in__.vector_constrain(N); - - Eigen::Matrix tau_pr; - (void) tau_pr; // dummy to suppress unused var warning - if (jacobian__) - tau_pr = in__.vector_constrain(N,lp__); - else - tau_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 24; - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - current_statement_begin__ = 25; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 27; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 28; - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - current_statement_begin__ = 29; - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 5), - "assigning variable tau"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(A(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: A" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 24; - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - current_statement_begin__ = 25; - check_greater_or_equal(function__,"tau",tau,0); - check_less_or_equal(function__,"tau",tau,5); - - // model body - - current_statement_begin__ = 34; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 35; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 38; - lp_accum__.add(normal_log(A_pr, 0, 1)); - current_statement_begin__ = 39; - lp_accum__.add(normal_log(tau_pr, 0, 1)); - current_statement_begin__ = 42; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 43; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 44; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - - - current_statement_begin__ = 46; - stan::math::assign(ev, initV); - current_statement_begin__ = 48; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 50; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(get_base1(tau,i,"tau",1),ev))); - current_statement_begin__ = 53; - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 56; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * PE))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("A_pr"); - names__.push_back("tau_pr"); - names__.push_back("A"); - names__.push_back("tau"); - names__.push_back("mu_A"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_bandit2arm_delta_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d A_pr = in__.vector_constrain(N); - vector_d tau_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 24; - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - current_statement_begin__ = 25; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 27; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 28; - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - current_statement_begin__ = 29; - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 5), - "assigning variable tau"); - } - - // validate transformed parameters - current_statement_begin__ = 24; - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - current_statement_begin__ = 25; - check_greater_or_equal(function__,"tau",tau,0); - check_less_or_equal(function__,"tau",tau,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 62; - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - - stan::math::initialize(mu_A, DUMMY_VAR__); - stan::math::fill(mu_A,DUMMY_VAR__); - current_statement_begin__ = 63; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - current_statement_begin__ = 66; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 69; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 72; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 73; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 74; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 78; - stan::math::assign(mu_A, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 79; - stan::math::assign(mu_tau, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - - current_statement_begin__ = 82; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 83; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 84; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - - - current_statement_begin__ = 87; - stan::math::assign(ev, initV); - current_statement_begin__ = 89; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 91; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 93; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(get_base1(tau,i,"tau",1),ev)))), - "assigning variable log_lik"); - current_statement_begin__ = 96; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(get_base1(tau,i,"tau",1),ev)), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 99; - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 102; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * PE))), - "assigning variable ev"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 62; - check_greater_or_equal(function__,"mu_A",mu_A,0); - check_less_or_equal(function__,"mu_A",mu_A,1); - current_statement_begin__ = 63; - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - check_less_or_equal(function__,"mu_tau",mu_tau,5); - current_statement_begin__ = 66; - current_statement_begin__ = 69; - - // write generated quantities - vars__.push_back(mu_A); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_bandit2arm_delta"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_bandit4arm_4par_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_bandit4arm_4par"); - reader.add_event(175, 173, "end", "model_bandit4arm_4par"); - return reader; -} - -class model_bandit4arm_4par : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > rew; - vector > los; - vector > choice; - vector_d initV; -public: - model_bandit4arm_4par(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_bandit4arm_4par(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_bandit4arm_4par_namespace::model_bandit4arm_4par"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 4; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 5; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("rew", "N", N); - validate_non_negative_index("rew", "T", T); - context__.validate_dims("data initialization", "rew", "double", context__.to_vec(N,T)); - validate_non_negative_index("rew", "N", N); - validate_non_negative_index("rew", "T", T); - rew = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("rew"); - pos__ = 0; - size_t rew_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < rew_limit_1__; ++i_1__) { - size_t rew_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < rew_limit_0__; ++i_0__) { - rew[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("los", "N", N); - validate_non_negative_index("los", "T", T); - context__.validate_dims("data initialization", "los", "double", context__.to_vec(N,T)); - validate_non_negative_index("los", "N", N); - validate_non_negative_index("los", "T", T); - los = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("los"); - pos__ = 0; - size_t los_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < los_limit_1__; ++i_1__) { - size_t los_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < los_limit_0__; ++i_0__) { - los[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 3; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 4; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 6; - current_statement_begin__ = 7; - current_statement_begin__ = 8; - // initialize data variables - current_statement_begin__ = 12; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 13; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 12; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 19; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 20; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 23; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 25; - validate_non_negative_index("R_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 26; - validate_non_negative_index("P_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_bandit4arm_4par() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("R_pr"))) - throw std::runtime_error("variable R_pr missing"); - vals_r__ = context__.vals_r("R_pr"); - pos__ = 0U; - validate_non_negative_index("R_pr", "N", N); - context__.validate_dims("initialization", "R_pr", "vector_d", context__.to_vec(N)); - vector_d R_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - R_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(R_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable R_pr: ") + e.what()); - } - - if (!(context__.contains_r("P_pr"))) - throw std::runtime_error("variable P_pr missing"); - vals_r__ = context__.vals_r("P_pr"); - pos__ = 0U; - validate_non_negative_index("P_pr", "N", N); - context__.validate_dims("initialization", "P_pr", "vector_d", context__.to_vec(N)); - vector_d P_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - P_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(P_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable P_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix R_pr; - (void) R_pr; // dummy to suppress unused var warning - if (jacobian__) - R_pr = in__.vector_constrain(N,lp__); - else - R_pr = in__.vector_constrain(N); - - Eigen::Matrix P_pr; - (void) P_pr; // dummy to suppress unused var warning - if (jacobian__) - P_pr = in__.vector_constrain(N,lp__); - else - P_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 31; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("R", "N", N); - Eigen::Matrix R(static_cast(N)); - (void) R; // dummy to suppress unused var warning - - stan::math::initialize(R, DUMMY_VAR__); - stan::math::fill(R,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("P", "N", N); - Eigen::Matrix P(static_cast(N)); - (void) P; // dummy to suppress unused var warning - - stan::math::initialize(P, DUMMY_VAR__); - stan::math::fill(P,DUMMY_VAR__); - - - current_statement_begin__ = 36; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 37; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 38; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 39; - stan::model::assign(R, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(R_pr,i,"R_pr",1)))) * 30), - "assigning variable R"); - current_statement_begin__ = 40; - stan::model::assign(P, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(P_pr,i,"P_pr",1)))) * 30), - "assigning variable P"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(R(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: R" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(P(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: P" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 31; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"R",R,0); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"P",P,0); - - // model body - - current_statement_begin__ = 46; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(Arew_pr, 0, 1.0)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(Apun_pr, 0, 1.0)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(R_pr, 0, 1.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(P_pr, 0, 1.0)); - current_statement_begin__ = 55; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 57; - validate_non_negative_index("Qr", "4", 4); - Eigen::Matrix Qr(static_cast(4)); - (void) Qr; // dummy to suppress unused var warning - - stan::math::initialize(Qr, DUMMY_VAR__); - stan::math::fill(Qr,DUMMY_VAR__); - current_statement_begin__ = 58; - validate_non_negative_index("Qp", "4", 4); - Eigen::Matrix Qp(static_cast(4)); - (void) Qp; // dummy to suppress unused var warning - - stan::math::initialize(Qp, DUMMY_VAR__); - stan::math::fill(Qp,DUMMY_VAR__); - current_statement_begin__ = 59; - validate_non_negative_index("PEr_fic", "4", 4); - Eigen::Matrix PEr_fic(static_cast(4)); - (void) PEr_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEr_fic, DUMMY_VAR__); - stan::math::fill(PEr_fic,DUMMY_VAR__); - current_statement_begin__ = 60; - validate_non_negative_index("PEp_fic", "4", 4); - Eigen::Matrix PEp_fic(static_cast(4)); - (void) PEp_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEp_fic, DUMMY_VAR__); - stan::math::fill(PEp_fic,DUMMY_VAR__); - current_statement_begin__ = 61; - validate_non_negative_index("Qsum", "4", 4); - Eigen::Matrix Qsum(static_cast(4)); - (void) Qsum; // dummy to suppress unused var warning - - stan::math::initialize(Qsum, DUMMY_VAR__); - stan::math::fill(Qsum,DUMMY_VAR__); - current_statement_begin__ = 63; - local_scalar_t__ Qr_chosen; - (void) Qr_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qr_chosen, DUMMY_VAR__); - stan::math::fill(Qr_chosen,DUMMY_VAR__); - current_statement_begin__ = 64; - local_scalar_t__ Qp_chosen; - (void) Qp_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qp_chosen, DUMMY_VAR__); - stan::math::fill(Qp_chosen,DUMMY_VAR__); - current_statement_begin__ = 65; - local_scalar_t__ PEr; - (void) PEr; // dummy to suppress unused var warning - - stan::math::initialize(PEr, DUMMY_VAR__); - stan::math::fill(PEr,DUMMY_VAR__); - current_statement_begin__ = 66; - local_scalar_t__ PEp; - (void) PEp; // dummy to suppress unused var warning - - stan::math::initialize(PEp, DUMMY_VAR__); - stan::math::fill(PEp,DUMMY_VAR__); - - - current_statement_begin__ = 69; - stan::math::assign(Qr, initV); - current_statement_begin__ = 70; - stan::math::assign(Qp, initV); - current_statement_begin__ = 71; - stan::math::assign(Qsum, initV); - current_statement_begin__ = 73; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 75; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), Qsum)); - current_statement_begin__ = 78; - stan::math::assign(PEr, ((get_base1(R,i,"R",1) * get_base1(get_base1(rew,i,"rew",1),t,"rew",2)) - get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1))); - current_statement_begin__ = 79; - stan::math::assign(PEp, ((get_base1(P,i,"P",1) * get_base1(get_base1(los,i,"los",1),t,"los",2)) - get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1))); - current_statement_begin__ = 80; - stan::math::assign(PEr_fic, minus(Qr)); - current_statement_begin__ = 81; - stan::math::assign(PEp_fic, minus(Qp)); - current_statement_begin__ = 84; - stan::math::assign(Qr_chosen, get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1)); - current_statement_begin__ = 85; - stan::math::assign(Qp_chosen, get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1)); - current_statement_begin__ = 88; - stan::math::assign(Qr, stan::model::deep_copy(add(Qr,multiply(get_base1(Arew,i,"Arew",1),PEr_fic)))); - current_statement_begin__ = 89; - stan::math::assign(Qp, stan::model::deep_copy(add(Qp,multiply(get_base1(Apun,i,"Apun",1),PEp_fic)))); - current_statement_begin__ = 91; - stan::model::assign(Qr, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qr_chosen + (get_base1(Arew,i,"Arew",1) * PEr)), - "assigning variable Qr"); - current_statement_begin__ = 92; - stan::model::assign(Qp, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qp_chosen + (get_base1(Apun,i,"Apun",1) * PEp)), - "assigning variable Qp"); - current_statement_begin__ = 95; - stan::math::assign(Qsum, add(Qr,Qp)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Arew_pr"); - names__.push_back("Apun_pr"); - names__.push_back("R_pr"); - names__.push_back("P_pr"); - names__.push_back("Arew"); - names__.push_back("Apun"); - names__.push_back("R"); - names__.push_back("P"); - names__.push_back("mu_Arew"); - names__.push_back("mu_Apun"); - names__.push_back("mu_R"); - names__.push_back("mu_P"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_bandit4arm_4par_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d R_pr = in__.vector_constrain(N); - vector_d P_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(R_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(P_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 31; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("R", "N", N); - Eigen::Matrix R(static_cast(N)); - (void) R; // dummy to suppress unused var warning - - stan::math::initialize(R, DUMMY_VAR__); - stan::math::fill(R,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("P", "N", N); - Eigen::Matrix P(static_cast(N)); - (void) P; // dummy to suppress unused var warning - - stan::math::initialize(P, DUMMY_VAR__); - stan::math::fill(P,DUMMY_VAR__); - - - current_statement_begin__ = 36; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 37; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 38; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 39; - stan::model::assign(R, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(R_pr,i,"R_pr",1)))) * 30), - "assigning variable R"); - current_statement_begin__ = 40; - stan::model::assign(P, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(P_pr,i,"P_pr",1)))) * 30), - "assigning variable P"); - } - - // validate transformed parameters - current_statement_begin__ = 31; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"R",R,0); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"P",P,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(R[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(P[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 101; - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - current_statement_begin__ = 102; - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - current_statement_begin__ = 103; - local_scalar_t__ mu_R; - (void) mu_R; // dummy to suppress unused var warning - - stan::math::initialize(mu_R, DUMMY_VAR__); - stan::math::fill(mu_R,DUMMY_VAR__); - current_statement_begin__ = 104; - local_scalar_t__ mu_P; - (void) mu_P; // dummy to suppress unused var warning - - stan::math::initialize(mu_P, DUMMY_VAR__); - stan::math::fill(mu_P,DUMMY_VAR__); - current_statement_begin__ = 107; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 110; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 113; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 114; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 115; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 119; - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 120; - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 121; - stan::math::assign(mu_R, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 30)); - current_statement_begin__ = 122; - stan::math::assign(mu_P, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 30)); - - current_statement_begin__ = 125; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 127; - validate_non_negative_index("Qr", "4", 4); - Eigen::Matrix Qr(static_cast(4)); - (void) Qr; // dummy to suppress unused var warning - - stan::math::initialize(Qr, DUMMY_VAR__); - stan::math::fill(Qr,DUMMY_VAR__); - current_statement_begin__ = 128; - validate_non_negative_index("Qp", "4", 4); - Eigen::Matrix Qp(static_cast(4)); - (void) Qp; // dummy to suppress unused var warning - - stan::math::initialize(Qp, DUMMY_VAR__); - stan::math::fill(Qp,DUMMY_VAR__); - current_statement_begin__ = 129; - validate_non_negative_index("PEr_fic", "4", 4); - Eigen::Matrix PEr_fic(static_cast(4)); - (void) PEr_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEr_fic, DUMMY_VAR__); - stan::math::fill(PEr_fic,DUMMY_VAR__); - current_statement_begin__ = 130; - validate_non_negative_index("PEp_fic", "4", 4); - Eigen::Matrix PEp_fic(static_cast(4)); - (void) PEp_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEp_fic, DUMMY_VAR__); - stan::math::fill(PEp_fic,DUMMY_VAR__); - current_statement_begin__ = 131; - validate_non_negative_index("Qsum", "4", 4); - Eigen::Matrix Qsum(static_cast(4)); - (void) Qsum; // dummy to suppress unused var warning - - stan::math::initialize(Qsum, DUMMY_VAR__); - stan::math::fill(Qsum,DUMMY_VAR__); - current_statement_begin__ = 133; - local_scalar_t__ Qr_chosen; - (void) Qr_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qr_chosen, DUMMY_VAR__); - stan::math::fill(Qr_chosen,DUMMY_VAR__); - current_statement_begin__ = 134; - local_scalar_t__ Qp_chosen; - (void) Qp_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qp_chosen, DUMMY_VAR__); - stan::math::fill(Qp_chosen,DUMMY_VAR__); - current_statement_begin__ = 135; - local_scalar_t__ PEr; - (void) PEr; // dummy to suppress unused var warning - - stan::math::initialize(PEr, DUMMY_VAR__); - stan::math::fill(PEr,DUMMY_VAR__); - current_statement_begin__ = 136; - local_scalar_t__ PEp; - (void) PEp; // dummy to suppress unused var warning - - stan::math::initialize(PEp, DUMMY_VAR__); - stan::math::fill(PEp,DUMMY_VAR__); - - - current_statement_begin__ = 139; - stan::math::assign(Qr, initV); - current_statement_begin__ = 140; - stan::math::assign(Qp, initV); - current_statement_begin__ = 141; - stan::math::assign(Qsum, initV); - current_statement_begin__ = 142; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0.0, - "assigning variable log_lik"); - current_statement_begin__ = 144; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 146; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),Qsum))), - "assigning variable log_lik"); - current_statement_begin__ = 149; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(Qsum), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 152; - stan::math::assign(PEr, ((get_base1(R,i,"R",1) * get_base1(get_base1(rew,i,"rew",1),t,"rew",2)) - get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1))); - current_statement_begin__ = 153; - stan::math::assign(PEp, ((get_base1(P,i,"P",1) * get_base1(get_base1(los,i,"los",1),t,"los",2)) - get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1))); - current_statement_begin__ = 154; - stan::math::assign(PEr_fic, minus(Qr)); - current_statement_begin__ = 155; - stan::math::assign(PEp_fic, minus(Qp)); - current_statement_begin__ = 158; - stan::math::assign(Qr_chosen, get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1)); - current_statement_begin__ = 159; - stan::math::assign(Qp_chosen, get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1)); - current_statement_begin__ = 162; - stan::math::assign(Qr, stan::model::deep_copy(add(Qr,multiply(get_base1(Arew,i,"Arew",1),PEr_fic)))); - current_statement_begin__ = 163; - stan::math::assign(Qp, stan::model::deep_copy(add(Qp,multiply(get_base1(Apun,i,"Apun",1),PEp_fic)))); - current_statement_begin__ = 165; - stan::model::assign(Qr, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qr_chosen + (get_base1(Arew,i,"Arew",1) * PEr)), - "assigning variable Qr"); - current_statement_begin__ = 166; - stan::model::assign(Qp, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qp_chosen + (get_base1(Apun,i,"Apun",1) * PEp)), - "assigning variable Qp"); - current_statement_begin__ = 169; - stan::math::assign(Qsum, add(Qr,Qp)); - } - } - } - - // validate generated quantities - current_statement_begin__ = 101; - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - current_statement_begin__ = 102; - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - current_statement_begin__ = 103; - check_greater_or_equal(function__,"mu_R",mu_R,0); - current_statement_begin__ = 104; - check_greater_or_equal(function__,"mu_P",mu_P,0); - current_statement_begin__ = 107; - current_statement_begin__ = 110; - - // write generated quantities - vars__.push_back(mu_Arew); - vars__.push_back(mu_Apun); - vars__.push_back(mu_R); - vars__.push_back(mu_P); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_bandit4arm_4par"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_R"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_P"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_R"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_P"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_bandit4arm_lapse_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_bandit4arm_lapse"); - reader.add_event(181, 179, "end", "model_bandit4arm_lapse"); - return reader; -} - -class model_bandit4arm_lapse : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > rew; - vector > los; - vector > choice; - vector_d initV; -public: - model_bandit4arm_lapse(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_bandit4arm_lapse(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_bandit4arm_lapse_namespace::model_bandit4arm_lapse"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 4; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 5; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("rew", "N", N); - validate_non_negative_index("rew", "T", T); - context__.validate_dims("data initialization", "rew", "double", context__.to_vec(N,T)); - validate_non_negative_index("rew", "N", N); - validate_non_negative_index("rew", "T", T); - rew = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("rew"); - pos__ = 0; - size_t rew_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < rew_limit_1__; ++i_1__) { - size_t rew_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < rew_limit_0__; ++i_0__) { - rew[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("los", "N", N); - validate_non_negative_index("los", "T", T); - context__.validate_dims("data initialization", "los", "double", context__.to_vec(N,T)); - validate_non_negative_index("los", "N", N); - validate_non_negative_index("los", "T", T); - los = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("los"); - pos__ = 0; - size_t los_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < los_limit_1__; ++i_1__) { - size_t los_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < los_limit_0__; ++i_0__) { - los[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 3; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 4; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 6; - current_statement_begin__ = 7; - current_statement_begin__ = 8; - // initialize data variables - current_statement_begin__ = 12; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 13; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 12; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 19; - validate_non_negative_index("mu_p", "5", 5); - num_params_r__ += 5; - current_statement_begin__ = 20; - validate_non_negative_index("sigma", "5", 5); - num_params_r__ += 5; - current_statement_begin__ = 23; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 25; - validate_non_negative_index("R_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 26; - validate_non_negative_index("P_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 27; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_bandit4arm_lapse() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "5", 5); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(5)); - vector_d mu_p(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "5", 5); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(5)); - vector_d sigma(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("R_pr"))) - throw std::runtime_error("variable R_pr missing"); - vals_r__ = context__.vals_r("R_pr"); - pos__ = 0U; - validate_non_negative_index("R_pr", "N", N); - context__.validate_dims("initialization", "R_pr", "vector_d", context__.to_vec(N)); - vector_d R_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - R_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(R_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable R_pr: ") + e.what()); - } - - if (!(context__.contains_r("P_pr"))) - throw std::runtime_error("variable P_pr missing"); - vals_r__ = context__.vals_r("P_pr"); - pos__ = 0U; - validate_non_negative_index("P_pr", "N", N); - context__.validate_dims("initialization", "P_pr", "vector_d", context__.to_vec(N)); - vector_d P_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - P_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(P_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable P_pr: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(5,lp__); - else - mu_p = in__.vector_constrain(5); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,5,lp__); - else - sigma = in__.vector_lb_constrain(0,5); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix R_pr; - (void) R_pr; // dummy to suppress unused var warning - if (jacobian__) - R_pr = in__.vector_constrain(N,lp__); - else - R_pr = in__.vector_constrain(N); - - Eigen::Matrix P_pr; - (void) P_pr; // dummy to suppress unused var warning - if (jacobian__) - P_pr = in__.vector_constrain(N,lp__); - else - P_pr = in__.vector_constrain(N); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 32; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("R", "N", N); - Eigen::Matrix R(static_cast(N)); - (void) R; // dummy to suppress unused var warning - - stan::math::initialize(R, DUMMY_VAR__); - stan::math::fill(R,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("P", "N", N); - Eigen::Matrix P(static_cast(N)); - (void) P; // dummy to suppress unused var warning - - stan::math::initialize(P, DUMMY_VAR__); - stan::math::fill(P,DUMMY_VAR__); - current_statement_begin__ = 36; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - - - current_statement_begin__ = 38; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 39; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 40; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 41; - stan::model::assign(R, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(R_pr,i,"R_pr",1)))) * 30), - "assigning variable R"); - current_statement_begin__ = 42; - stan::model::assign(P, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(P_pr,i,"P_pr",1)))) * 30), - "assigning variable P"); - current_statement_begin__ = 43; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(R(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: R" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(P(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: P" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 32; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"R",R,0); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"P",P,0); - current_statement_begin__ = 36; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - - // model body - - current_statement_begin__ = 49; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(Arew_pr, 0, 1.0)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(Apun_pr, 0, 1.0)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(R_pr, 0, 1.0)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(P_pr, 0, 1.0)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 59; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 61; - validate_non_negative_index("Qr", "4", 4); - Eigen::Matrix Qr(static_cast(4)); - (void) Qr; // dummy to suppress unused var warning - - stan::math::initialize(Qr, DUMMY_VAR__); - stan::math::fill(Qr,DUMMY_VAR__); - current_statement_begin__ = 62; - validate_non_negative_index("Qp", "4", 4); - Eigen::Matrix Qp(static_cast(4)); - (void) Qp; // dummy to suppress unused var warning - - stan::math::initialize(Qp, DUMMY_VAR__); - stan::math::fill(Qp,DUMMY_VAR__); - current_statement_begin__ = 63; - validate_non_negative_index("PEr_fic", "4", 4); - Eigen::Matrix PEr_fic(static_cast(4)); - (void) PEr_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEr_fic, DUMMY_VAR__); - stan::math::fill(PEr_fic,DUMMY_VAR__); - current_statement_begin__ = 64; - validate_non_negative_index("PEp_fic", "4", 4); - Eigen::Matrix PEp_fic(static_cast(4)); - (void) PEp_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEp_fic, DUMMY_VAR__); - stan::math::fill(PEp_fic,DUMMY_VAR__); - current_statement_begin__ = 65; - validate_non_negative_index("Qsum", "4", 4); - Eigen::Matrix Qsum(static_cast(4)); - (void) Qsum; // dummy to suppress unused var warning - - stan::math::initialize(Qsum, DUMMY_VAR__); - stan::math::fill(Qsum,DUMMY_VAR__); - current_statement_begin__ = 67; - local_scalar_t__ Qr_chosen; - (void) Qr_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qr_chosen, DUMMY_VAR__); - stan::math::fill(Qr_chosen,DUMMY_VAR__); - current_statement_begin__ = 68; - local_scalar_t__ Qp_chosen; - (void) Qp_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qp_chosen, DUMMY_VAR__); - stan::math::fill(Qp_chosen,DUMMY_VAR__); - current_statement_begin__ = 69; - local_scalar_t__ PEr; - (void) PEr; // dummy to suppress unused var warning - - stan::math::initialize(PEr, DUMMY_VAR__); - stan::math::fill(PEr,DUMMY_VAR__); - current_statement_begin__ = 70; - local_scalar_t__ PEp; - (void) PEp; // dummy to suppress unused var warning - - stan::math::initialize(PEp, DUMMY_VAR__); - stan::math::fill(PEp,DUMMY_VAR__); - - - current_statement_begin__ = 73; - stan::math::assign(Qr, initV); - current_statement_begin__ = 74; - stan::math::assign(Qp, initV); - current_statement_begin__ = 75; - stan::math::assign(Qsum, initV); - current_statement_begin__ = 77; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 79; - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), add(multiply(softmax(Qsum),(1 - get_base1(xi,i,"xi",1))),(get_base1(xi,i,"xi",1) / 4)))); - current_statement_begin__ = 82; - stan::math::assign(PEr, ((get_base1(R,i,"R",1) * get_base1(get_base1(rew,i,"rew",1),t,"rew",2)) - get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1))); - current_statement_begin__ = 83; - stan::math::assign(PEp, ((get_base1(P,i,"P",1) * get_base1(get_base1(los,i,"los",1),t,"los",2)) - get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1))); - current_statement_begin__ = 84; - stan::math::assign(PEr_fic, minus(Qr)); - current_statement_begin__ = 85; - stan::math::assign(PEp_fic, minus(Qp)); - current_statement_begin__ = 88; - stan::math::assign(Qr_chosen, get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1)); - current_statement_begin__ = 89; - stan::math::assign(Qp_chosen, get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1)); - current_statement_begin__ = 92; - stan::math::assign(Qr, stan::model::deep_copy(add(Qr,multiply(get_base1(Arew,i,"Arew",1),PEr_fic)))); - current_statement_begin__ = 93; - stan::math::assign(Qp, stan::model::deep_copy(add(Qp,multiply(get_base1(Apun,i,"Apun",1),PEp_fic)))); - current_statement_begin__ = 95; - stan::model::assign(Qr, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qr_chosen + (get_base1(Arew,i,"Arew",1) * PEr)), - "assigning variable Qr"); - current_statement_begin__ = 96; - stan::model::assign(Qp, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qp_chosen + (get_base1(Apun,i,"Apun",1) * PEp)), - "assigning variable Qp"); - current_statement_begin__ = 99; - stan::math::assign(Qsum, add(Qr,Qp)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Arew_pr"); - names__.push_back("Apun_pr"); - names__.push_back("R_pr"); - names__.push_back("P_pr"); - names__.push_back("xi_pr"); - names__.push_back("Arew"); - names__.push_back("Apun"); - names__.push_back("R"); - names__.push_back("P"); - names__.push_back("xi"); - names__.push_back("mu_Arew"); - names__.push_back("mu_Apun"); - names__.push_back("mu_R"); - names__.push_back("mu_P"); - names__.push_back("mu_xi"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_bandit4arm_lapse_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(5); - vector_d sigma = in__.vector_lb_constrain(0,5); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d R_pr = in__.vector_constrain(N); - vector_d P_pr = in__.vector_constrain(N); - vector_d xi_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(R_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(P_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 32; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("R", "N", N); - Eigen::Matrix R(static_cast(N)); - (void) R; // dummy to suppress unused var warning - - stan::math::initialize(R, DUMMY_VAR__); - stan::math::fill(R,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("P", "N", N); - Eigen::Matrix P(static_cast(N)); - (void) P; // dummy to suppress unused var warning - - stan::math::initialize(P, DUMMY_VAR__); - stan::math::fill(P,DUMMY_VAR__); - current_statement_begin__ = 36; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - - - current_statement_begin__ = 38; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 39; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 40; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 41; - stan::model::assign(R, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(R_pr,i,"R_pr",1)))) * 30), - "assigning variable R"); - current_statement_begin__ = 42; - stan::model::assign(P, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(P_pr,i,"P_pr",1)))) * 30), - "assigning variable P"); - current_statement_begin__ = 43; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - } - - // validate transformed parameters - current_statement_begin__ = 32; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"R",R,0); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"P",P,0); - current_statement_begin__ = 36; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(R[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(P[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 105; - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - current_statement_begin__ = 106; - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - current_statement_begin__ = 107; - local_scalar_t__ mu_R; - (void) mu_R; // dummy to suppress unused var warning - - stan::math::initialize(mu_R, DUMMY_VAR__); - stan::math::fill(mu_R,DUMMY_VAR__); - current_statement_begin__ = 108; - local_scalar_t__ mu_P; - (void) mu_P; // dummy to suppress unused var warning - - stan::math::initialize(mu_P, DUMMY_VAR__); - stan::math::fill(mu_P,DUMMY_VAR__); - current_statement_begin__ = 109; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 112; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 115; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 118; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 119; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 120; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 124; - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 125; - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 126; - stan::math::assign(mu_R, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 30)); - current_statement_begin__ = 127; - stan::math::assign(mu_P, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 30)); - current_statement_begin__ = 128; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,5,"mu_p",1))); - - current_statement_begin__ = 131; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 133; - validate_non_negative_index("Qr", "4", 4); - Eigen::Matrix Qr(static_cast(4)); - (void) Qr; // dummy to suppress unused var warning - - stan::math::initialize(Qr, DUMMY_VAR__); - stan::math::fill(Qr,DUMMY_VAR__); - current_statement_begin__ = 134; - validate_non_negative_index("Qp", "4", 4); - Eigen::Matrix Qp(static_cast(4)); - (void) Qp; // dummy to suppress unused var warning - - stan::math::initialize(Qp, DUMMY_VAR__); - stan::math::fill(Qp,DUMMY_VAR__); - current_statement_begin__ = 135; - validate_non_negative_index("PEr_fic", "4", 4); - Eigen::Matrix PEr_fic(static_cast(4)); - (void) PEr_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEr_fic, DUMMY_VAR__); - stan::math::fill(PEr_fic,DUMMY_VAR__); - current_statement_begin__ = 136; - validate_non_negative_index("PEp_fic", "4", 4); - Eigen::Matrix PEp_fic(static_cast(4)); - (void) PEp_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEp_fic, DUMMY_VAR__); - stan::math::fill(PEp_fic,DUMMY_VAR__); - current_statement_begin__ = 137; - validate_non_negative_index("Qsum", "4", 4); - Eigen::Matrix Qsum(static_cast(4)); - (void) Qsum; // dummy to suppress unused var warning - - stan::math::initialize(Qsum, DUMMY_VAR__); - stan::math::fill(Qsum,DUMMY_VAR__); - current_statement_begin__ = 139; - local_scalar_t__ Qr_chosen; - (void) Qr_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qr_chosen, DUMMY_VAR__); - stan::math::fill(Qr_chosen,DUMMY_VAR__); - current_statement_begin__ = 140; - local_scalar_t__ Qp_chosen; - (void) Qp_chosen; // dummy to suppress unused var warning - - stan::math::initialize(Qp_chosen, DUMMY_VAR__); - stan::math::fill(Qp_chosen,DUMMY_VAR__); - current_statement_begin__ = 141; - local_scalar_t__ PEr; - (void) PEr; // dummy to suppress unused var warning - - stan::math::initialize(PEr, DUMMY_VAR__); - stan::math::fill(PEr,DUMMY_VAR__); - current_statement_begin__ = 142; - local_scalar_t__ PEp; - (void) PEp; // dummy to suppress unused var warning - - stan::math::initialize(PEp, DUMMY_VAR__); - stan::math::fill(PEp,DUMMY_VAR__); - - - current_statement_begin__ = 145; - stan::math::assign(Qr, initV); - current_statement_begin__ = 146; - stan::math::assign(Qp, initV); - current_statement_begin__ = 147; - stan::math::assign(Qsum, initV); - current_statement_begin__ = 148; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0.0, - "assigning variable log_lik"); - current_statement_begin__ = 150; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 152; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),add(multiply(softmax(Qsum),(1 - get_base1(xi,i,"xi",1))),(get_base1(xi,i,"xi",1) / 4))))), - "assigning variable log_lik"); - current_statement_begin__ = 155; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(add(multiply(softmax(Qsum),(1 - get_base1(xi,i,"xi",1))),(get_base1(xi,i,"xi",1) / 4)), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 158; - stan::math::assign(PEr, ((get_base1(R,i,"R",1) * get_base1(get_base1(rew,i,"rew",1),t,"rew",2)) - get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1))); - current_statement_begin__ = 159; - stan::math::assign(PEp, ((get_base1(P,i,"P",1) * get_base1(get_base1(los,i,"los",1),t,"los",2)) - get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1))); - current_statement_begin__ = 160; - stan::math::assign(PEr_fic, minus(Qr)); - current_statement_begin__ = 161; - stan::math::assign(PEp_fic, minus(Qp)); - current_statement_begin__ = 164; - stan::math::assign(Qr_chosen, get_base1(Qr,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qr",1)); - current_statement_begin__ = 165; - stan::math::assign(Qp_chosen, get_base1(Qp,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"Qp",1)); - current_statement_begin__ = 168; - stan::math::assign(Qr, stan::model::deep_copy(add(Qr,multiply(get_base1(Arew,i,"Arew",1),PEr_fic)))); - current_statement_begin__ = 169; - stan::math::assign(Qp, stan::model::deep_copy(add(Qp,multiply(get_base1(Apun,i,"Apun",1),PEp_fic)))); - current_statement_begin__ = 171; - stan::model::assign(Qr, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qr_chosen + (get_base1(Arew,i,"Arew",1) * PEr)), - "assigning variable Qr"); - current_statement_begin__ = 172; - stan::model::assign(Qp, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (Qp_chosen + (get_base1(Apun,i,"Apun",1) * PEp)), - "assigning variable Qp"); - current_statement_begin__ = 175; - stan::math::assign(Qsum, add(Qr,Qp)); - } - } - } - - // validate generated quantities - current_statement_begin__ = 105; - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - current_statement_begin__ = 106; - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - current_statement_begin__ = 107; - check_greater_or_equal(function__,"mu_R",mu_R,0); - current_statement_begin__ = 108; - check_greater_or_equal(function__,"mu_P",mu_P,0); - current_statement_begin__ = 109; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 112; - current_statement_begin__ = 115; - - // write generated quantities - vars__.push_back(mu_Arew); - vars__.push_back(mu_Apun); - vars__.push_back(mu_R); - vars__.push_back(mu_P); - vars__.push_back(mu_xi); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_bandit4arm_lapse"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_R"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_P"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "R" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "P" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_R"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_P"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_bart_par4_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_bart_par4"); - reader.add_event(128, 126, "end", "model_bart_par4"); - return reader; -} - -class model_bart_par4 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - int P; - vector > pumps; - vector > explosion; - vector > > d; -public: - model_bart_par4(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_bart_par4(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_bart_par4_namespace::model_bart_par4"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - context__.validate_dims("data initialization", "P", "int", context__.to_vec()); - P = int(0); - vals_i__ = context__.vals_i("P"); - pos__ = 0; - P = vals_i__[pos__++]; - current_statement_begin__ = 6; - validate_non_negative_index("pumps", "N", N); - validate_non_negative_index("pumps", "T", T); - context__.validate_dims("data initialization", "pumps", "int", context__.to_vec(N,T)); - validate_non_negative_index("pumps", "N", N); - validate_non_negative_index("pumps", "T", T); - pumps = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pumps"); - pos__ = 0; - size_t pumps_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pumps_limit_1__; ++i_1__) { - size_t pumps_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pumps_limit_0__; ++i_0__) { - pumps[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("explosion", "N", N); - validate_non_negative_index("explosion", "T", T); - context__.validate_dims("data initialization", "explosion", "int", context__.to_vec(N,T)); - validate_non_negative_index("explosion", "N", N); - validate_non_negative_index("explosion", "T", T); - explosion = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("explosion"); - pos__ = 0; - size_t explosion_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < explosion_limit_1__; ++i_1__) { - size_t explosion_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < explosion_limit_0__; ++i_0__) { - explosion[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],0); - } - current_statement_begin__ = 5; - check_greater_or_equal(function__,"P",P,2); - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pumps[k0__][k1__]",pumps[k0__][k1__],0); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"explosion[k0__][k1__]",explosion[k0__][k1__],0); - check_less_or_equal(function__,"explosion[k0__][k1__]",explosion[k0__][k1__],1); - } - } - // initialize data variables - current_statement_begin__ = 12; - validate_non_negative_index("d", "N", N); - validate_non_negative_index("d", "T", T); - validate_non_negative_index("d", "P", P); - d = std::vector > >(N,std::vector >(T,std::vector(P,int(0)))); - stan::math::fill(d, std::numeric_limits::min()); - - current_statement_begin__ = 14; - for (int j = 1; j <= N; ++j) { - - current_statement_begin__ = 15; - for (int k = 1; k <= get_base1(Tsubj,j,"Tsubj",1); ++k) { - - current_statement_begin__ = 16; - for (int l = 1; l <= P; ++l) { - - current_statement_begin__ = 17; - if (as_bool(logical_lte(l,get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2)))) { - current_statement_begin__ = 18; - stan::model::assign(d, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_uni(l), stan::model::nil_index_list()))), - 1, - "assigning variable d"); - } else { - current_statement_begin__ = 20; - stan::model::assign(d, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_uni(l), stan::model::nil_index_list()))), - 0, - "assigning variable d"); - } - } - } - } - - // validate transformed data - current_statement_begin__ = 12; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 28; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 29; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 32; - validate_non_negative_index("phi_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 33; - validate_non_negative_index("eta_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 34; - validate_non_negative_index("gam_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 35; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_bart_par4() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("phi_p"))) - throw std::runtime_error("variable phi_p missing"); - vals_r__ = context__.vals_r("phi_p"); - pos__ = 0U; - validate_non_negative_index("phi_p", "N", N); - context__.validate_dims("initialization", "phi_p", "vector_d", context__.to_vec(N)); - vector_d phi_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - phi_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(phi_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable phi_p: ") + e.what()); - } - - if (!(context__.contains_r("eta_p"))) - throw std::runtime_error("variable eta_p missing"); - vals_r__ = context__.vals_r("eta_p"); - pos__ = 0U; - validate_non_negative_index("eta_p", "N", N); - context__.validate_dims("initialization", "eta_p", "vector_d", context__.to_vec(N)); - vector_d eta_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_p: ") + e.what()); - } - - if (!(context__.contains_r("gam_p"))) - throw std::runtime_error("variable gam_p missing"); - vals_r__ = context__.vals_r("gam_p"); - pos__ = 0U; - validate_non_negative_index("gam_p", "N", N); - context__.validate_dims("initialization", "gam_p", "vector_d", context__.to_vec(N)); - vector_d gam_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - gam_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(gam_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable gam_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix phi_p; - (void) phi_p; // dummy to suppress unused var warning - if (jacobian__) - phi_p = in__.vector_constrain(N,lp__); - else - phi_p = in__.vector_constrain(N); - - Eigen::Matrix eta_p; - (void) eta_p; // dummy to suppress unused var warning - if (jacobian__) - eta_p = in__.vector_constrain(N,lp__); - else - eta_p = in__.vector_constrain(N); - - Eigen::Matrix gam_p; - (void) gam_p; // dummy to suppress unused var warning - if (jacobian__) - gam_p = in__.vector_constrain(N,lp__); - else - gam_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 40; - validate_non_negative_index("phi", "N", N); - Eigen::Matrix phi(static_cast(N)); - (void) phi; // dummy to suppress unused var warning - - stan::math::initialize(phi, DUMMY_VAR__); - stan::math::fill(phi,DUMMY_VAR__); - current_statement_begin__ = 41; - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - current_statement_begin__ = 42; - validate_non_negative_index("gam", "N", N); - Eigen::Matrix gam(static_cast(N)); - (void) gam; // dummy to suppress unused var warning - - stan::math::initialize(gam, DUMMY_VAR__); - stan::math::fill(gam,DUMMY_VAR__); - current_statement_begin__ = 43; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 45; - stan::math::assign(phi, Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),phi_p)))); - current_statement_begin__ = 46; - stan::math::assign(eta, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),eta_p)))); - current_statement_begin__ = 47; - stan::math::assign(gam, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gam_p)))); - current_statement_begin__ = 48; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),tau_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(phi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: phi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(gam(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: gam" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 40; - check_greater_or_equal(function__,"phi",phi,0); - check_less_or_equal(function__,"phi",phi,1); - current_statement_begin__ = 41; - check_greater_or_equal(function__,"eta",eta,0); - current_statement_begin__ = 42; - check_greater_or_equal(function__,"gam",gam,0); - current_statement_begin__ = 43; - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - current_statement_begin__ = 53; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(phi_p, 0, 1)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(eta_p, 0, 1)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(gam_p, 0, 1)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(tau_p, 0, 1)); - current_statement_begin__ = 62; - for (int j = 1; j <= N; ++j) { - { - current_statement_begin__ = 64; - int n_succ(0); - (void) n_succ; // dummy to suppress unused var warning - - stan::math::fill(n_succ, std::numeric_limits::min()); - stan::math::assign(n_succ,0); - current_statement_begin__ = 65; - int n_pump(0); - (void) n_pump; // dummy to suppress unused var warning - - stan::math::fill(n_pump, std::numeric_limits::min()); - stan::math::assign(n_pump,0); - - - current_statement_begin__ = 67; - for (int k = 1; k <= get_base1(Tsubj,j,"Tsubj",1); ++k) { - { - current_statement_begin__ = 68; - local_scalar_t__ p_burst; - (void) p_burst; // dummy to suppress unused var warning - - stan::math::initialize(p_burst, DUMMY_VAR__); - stan::math::fill(p_burst,DUMMY_VAR__); - current_statement_begin__ = 69; - local_scalar_t__ omega; - (void) omega; // dummy to suppress unused var warning - - stan::math::initialize(omega, DUMMY_VAR__); - stan::math::fill(omega,DUMMY_VAR__); - - - current_statement_begin__ = 71; - stan::math::assign(p_burst, (1 - ((get_base1(phi,j,"phi",1) + (get_base1(eta,j,"eta",1) * n_succ)) / (1 + (get_base1(eta,j,"eta",1) * n_pump))))); - current_statement_begin__ = 72; - stan::math::assign(omega, (-(get_base1(gam,j,"gam",1)) / log1m(p_burst))); - current_statement_begin__ = 75; - for (int l = 1; l <= ((get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2) + 1) - get_base1(get_base1(explosion,j,"explosion",1),k,"explosion",2)); ++l) { - current_statement_begin__ = 76; - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(get_base1(d,j,"d",1),k,"d",2),l,"d",3), (get_base1(tau,j,"tau",1) * (omega - l)))); - } - current_statement_begin__ = 79; - stan::math::assign(n_succ, (n_succ + (get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2) - get_base1(get_base1(explosion,j,"explosion",1),k,"explosion",2)))); - current_statement_begin__ = 80; - stan::math::assign(n_pump, (n_pump + get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2))); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("phi_p"); - names__.push_back("eta_p"); - names__.push_back("gam_p"); - names__.push_back("tau_p"); - names__.push_back("phi"); - names__.push_back("eta"); - names__.push_back("gam"); - names__.push_back("tau"); - names__.push_back("mu_phi"); - names__.push_back("mu_eta"); - names__.push_back("mu_gam"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dims__.push_back(P); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_bart_par4_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d phi_p = in__.vector_constrain(N); - vector_d eta_p = in__.vector_constrain(N); - vector_d gam_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(phi_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gam_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 40; - validate_non_negative_index("phi", "N", N); - Eigen::Matrix phi(static_cast(N)); - (void) phi; // dummy to suppress unused var warning - - stan::math::initialize(phi, DUMMY_VAR__); - stan::math::fill(phi,DUMMY_VAR__); - current_statement_begin__ = 41; - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - current_statement_begin__ = 42; - validate_non_negative_index("gam", "N", N); - Eigen::Matrix gam(static_cast(N)); - (void) gam; // dummy to suppress unused var warning - - stan::math::initialize(gam, DUMMY_VAR__); - stan::math::fill(gam,DUMMY_VAR__); - current_statement_begin__ = 43; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 45; - stan::math::assign(phi, Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),phi_p)))); - current_statement_begin__ = 46; - stan::math::assign(eta, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),eta_p)))); - current_statement_begin__ = 47; - stan::math::assign(gam, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gam_p)))); - current_statement_begin__ = 48; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),tau_p)))); - - // validate transformed parameters - current_statement_begin__ = 40; - check_greater_or_equal(function__,"phi",phi,0); - check_less_or_equal(function__,"phi",phi,1); - current_statement_begin__ = 41; - check_greater_or_equal(function__,"eta",eta,0); - current_statement_begin__ = 42; - check_greater_or_equal(function__,"gam",gam,0); - current_statement_begin__ = 43; - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(phi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gam[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 87; - local_scalar_t__ mu_phi; - (void) mu_phi; // dummy to suppress unused var warning - - stan::math::initialize(mu_phi, DUMMY_VAR__); - stan::math::fill(mu_phi,DUMMY_VAR__); - stan::math::assign(mu_phi,Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 88; - local_scalar_t__ mu_eta; - (void) mu_eta; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta, DUMMY_VAR__); - stan::math::fill(mu_eta,DUMMY_VAR__); - stan::math::assign(mu_eta,stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 89; - local_scalar_t__ mu_gam; - (void) mu_gam; // dummy to suppress unused var warning - - stan::math::initialize(mu_gam, DUMMY_VAR__); - stan::math::fill(mu_gam,DUMMY_VAR__); - stan::math::assign(mu_gam,stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - current_statement_begin__ = 90; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - stan::math::assign(mu_tau,stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - current_statement_begin__ = 93; - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - stan::math::assign(log_lik,0); - current_statement_begin__ = 96; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - validate_non_negative_index("y_pred", "P", P); - vector > > y_pred(N, (vector >(T, (vector(P))))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 99; - for (int j = 1; j <= N; ++j) { - current_statement_begin__ = 100; - for (int k = 1; k <= T; ++k) { - current_statement_begin__ = 101; - for (int l = 1; l <= P; ++l) { - current_statement_begin__ = 102; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_uni(l), stan::model::nil_index_list()))), - -(1), - "assigning variable y_pred"); - } - } - } - - current_statement_begin__ = 105; - for (int j = 1; j <= N; ++j) { - { - current_statement_begin__ = 106; - int n_succ(0); - (void) n_succ; // dummy to suppress unused var warning - - stan::math::fill(n_succ, std::numeric_limits::min()); - stan::math::assign(n_succ,0); - current_statement_begin__ = 107; - int n_pump(0); - (void) n_pump; // dummy to suppress unused var warning - - stan::math::fill(n_pump, std::numeric_limits::min()); - stan::math::assign(n_pump,0); - - - current_statement_begin__ = 109; - for (int k = 1; k <= get_base1(Tsubj,j,"Tsubj",1); ++k) { - { - current_statement_begin__ = 110; - local_scalar_t__ p_burst; - (void) p_burst; // dummy to suppress unused var warning - - stan::math::initialize(p_burst, DUMMY_VAR__); - stan::math::fill(p_burst,DUMMY_VAR__); - current_statement_begin__ = 111; - local_scalar_t__ omega; - (void) omega; // dummy to suppress unused var warning - - stan::math::initialize(omega, DUMMY_VAR__); - stan::math::fill(omega,DUMMY_VAR__); - - - current_statement_begin__ = 113; - stan::math::assign(p_burst, (1 - ((get_base1(phi,j,"phi",1) + (get_base1(eta,j,"eta",1) * n_succ)) / (1 + (get_base1(eta,j,"eta",1) * n_pump))))); - current_statement_begin__ = 114; - stan::math::assign(omega, (-(get_base1(gam,j,"gam",1)) / log1m(p_burst))); - current_statement_begin__ = 116; - for (int l = 1; l <= ((get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2) + 1) - get_base1(get_base1(explosion,j,"explosion",1),k,"explosion",2)); ++l) { - - current_statement_begin__ = 117; - stan::math::assign(log_lik, (log_lik + bernoulli_logit_log(get_base1(get_base1(get_base1(d,j,"d",1),k,"d",2),l,"d",3),(get_base1(tau,j,"tau",1) * (omega - l))))); - current_statement_begin__ = 118; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_uni(k), stan::model::cons_list(stan::model::index_uni(l), stan::model::nil_index_list()))), - bernoulli_logit_rng((get_base1(tau,j,"tau",1) * (omega - l)), base_rng__), - "assigning variable y_pred"); - } - current_statement_begin__ = 121; - stan::math::assign(n_succ, (n_succ + (get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2) - get_base1(get_base1(explosion,j,"explosion",1),k,"explosion",2)))); - current_statement_begin__ = 122; - stan::math::assign(n_pump, (n_pump + get_base1(get_base1(pumps,j,"pumps",1),k,"pumps",2))); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 87; - check_greater_or_equal(function__,"mu_phi",mu_phi,0); - current_statement_begin__ = 88; - check_greater_or_equal(function__,"mu_eta",mu_eta,0); - current_statement_begin__ = 89; - check_greater_or_equal(function__,"mu_gam",mu_gam,0); - current_statement_begin__ = 90; - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - current_statement_begin__ = 93; - current_statement_begin__ = 96; - - // write generated quantities - vars__.push_back(mu_phi); - vars__.push_back(mu_eta); - vars__.push_back(mu_gam); - vars__.push_back(mu_tau); - vars__.push_back(log_lik); - for (int k_2__ = 0; k_2__ < P; ++k_2__) { - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__][k_2__]); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_bart_par4"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_phi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gam"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_2__ = 1; k_2__ <= P; ++k_2__) { - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_phi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gam"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_2__ = 1; k_2__ <= P; ++k_2__) { - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_choiceRT_ddm_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_choiceRT_ddm"); - reader.add_event(97, 95, "end", "model_choiceRT_ddm"); - return reader; -} - -class model_choiceRT_ddm : public prob_grad { -private: - int N; - int Nu_max; - int Nl_max; - vector Nu; - vector Nl; - vector > RTu; - vector > RTl; - vector minRT; - double RTbound; -public: - model_choiceRT_ddm(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_choiceRT_ddm(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_choiceRT_ddm_namespace::model_choiceRT_ddm"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 4; - context__.validate_dims("data initialization", "Nu_max", "int", context__.to_vec()); - Nu_max = int(0); - vals_i__ = context__.vals_i("Nu_max"); - pos__ = 0; - Nu_max = vals_i__[pos__++]; - current_statement_begin__ = 5; - context__.validate_dims("data initialization", "Nl_max", "int", context__.to_vec()); - Nl_max = int(0); - vals_i__ = context__.vals_i("Nl_max"); - pos__ = 0; - Nl_max = vals_i__[pos__++]; - current_statement_begin__ = 6; - validate_non_negative_index("Nu", "N", N); - context__.validate_dims("data initialization", "Nu", "int", context__.to_vec(N)); - validate_non_negative_index("Nu", "N", N); - Nu = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Nu"); - pos__ = 0; - size_t Nu_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Nu_limit_0__; ++i_0__) { - Nu[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 7; - validate_non_negative_index("Nl", "N", N); - context__.validate_dims("data initialization", "Nl", "int", context__.to_vec(N)); - validate_non_negative_index("Nl", "N", N); - Nl = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Nl"); - pos__ = 0; - size_t Nl_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Nl_limit_0__; ++i_0__) { - Nl[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 8; - validate_non_negative_index("RTu", "N", N); - validate_non_negative_index("RTu", "Nu_max", Nu_max); - context__.validate_dims("data initialization", "RTu", "double", context__.to_vec(N,Nu_max)); - validate_non_negative_index("RTu", "N", N); - validate_non_negative_index("RTu", "Nu_max", Nu_max); - RTu = std::vector >(N,std::vector(Nu_max,double(0))); - vals_r__ = context__.vals_r("RTu"); - pos__ = 0; - size_t RTu_limit_1__ = Nu_max; - for (size_t i_1__ = 0; i_1__ < RTu_limit_1__; ++i_1__) { - size_t RTu_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < RTu_limit_0__; ++i_0__) { - RTu[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 9; - validate_non_negative_index("RTl", "N", N); - validate_non_negative_index("RTl", "Nl_max", Nl_max); - context__.validate_dims("data initialization", "RTl", "double", context__.to_vec(N,Nl_max)); - validate_non_negative_index("RTl", "N", N); - validate_non_negative_index("RTl", "Nl_max", Nl_max); - RTl = std::vector >(N,std::vector(Nl_max,double(0))); - vals_r__ = context__.vals_r("RTl"); - pos__ = 0; - size_t RTl_limit_1__ = Nl_max; - for (size_t i_1__ = 0; i_1__ < RTl_limit_1__; ++i_1__) { - size_t RTl_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < RTl_limit_0__; ++i_0__) { - RTl[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 10; - validate_non_negative_index("minRT", "N", N); - context__.validate_dims("data initialization", "minRT", "double", context__.to_vec(N)); - validate_non_negative_index("minRT", "N", N); - minRT = std::vector(N,double(0)); - vals_r__ = context__.vals_r("minRT"); - pos__ = 0; - size_t minRT_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < minRT_limit_0__; ++i_0__) { - minRT[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 11; - context__.validate_dims("data initialization", "RTbound", "double", context__.to_vec()); - RTbound = double(0); - vals_r__ = context__.vals_r("RTbound"); - pos__ = 0; - RTbound = vals_r__[pos__++]; - - // validate, data variables - current_statement_begin__ = 3; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 4; - check_greater_or_equal(function__,"Nu_max",Nu_max,0); - current_statement_begin__ = 5; - check_greater_or_equal(function__,"Nl_max",Nl_max,0); - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Nu[k0__]",Nu[k0__],0); - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Nl[k0__]",Nl[k0__],0); - } - current_statement_begin__ = 8; - current_statement_begin__ = 9; - current_statement_begin__ = 10; - current_statement_begin__ = 11; - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 27; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 28; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 31; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 32; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 33; - validate_non_negative_index("delta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 34; - validate_non_negative_index("tau_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_choiceRT_ddm() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("delta_pr"))) - throw std::runtime_error("variable delta_pr missing"); - vals_r__ = context__.vals_r("delta_pr"); - pos__ = 0U; - validate_non_negative_index("delta_pr", "N", N); - context__.validate_dims("initialization", "delta_pr", "vector_d", context__.to_vec(N)); - vector_d delta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - delta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(delta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable delta_pr: ") + e.what()); - } - - if (!(context__.contains_r("tau_pr"))) - throw std::runtime_error("variable tau_pr missing"); - vals_r__ = context__.vals_r("tau_pr"); - pos__ = 0U; - validate_non_negative_index("tau_pr", "N", N); - context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); - vector_d tau_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - Eigen::Matrix delta_pr; - (void) delta_pr; // dummy to suppress unused var warning - if (jacobian__) - delta_pr = in__.vector_constrain(N,lp__); - else - delta_pr = in__.vector_constrain(N); - - Eigen::Matrix tau_pr; - (void) tau_pr; // dummy to suppress unused var warning - if (jacobian__) - tau_pr = in__.vector_constrain(N,lp__); - else - tau_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 39; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 40; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - current_statement_begin__ = 41; - validate_non_negative_index("delta", "N", N); - Eigen::Matrix delta(static_cast(N)); - (void) delta; // dummy to suppress unused var warning - - stan::math::initialize(delta, DUMMY_VAR__); - stan::math::fill(delta,DUMMY_VAR__); - current_statement_begin__ = 42; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 44; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 45; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))), - "assigning variable beta"); - current_statement_begin__ = 46; - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - ((Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * (get_base1(minRT,i,"minRT",1) - RTbound)) + RTbound), - "assigning variable tau"); - } - current_statement_begin__ = 48; - stan::math::assign(alpha, stan::math::exp(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr)))); - current_statement_begin__ = 49; - stan::math::assign(delta, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),delta_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(delta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: delta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 39; - check_greater_or_equal(function__,"alpha",alpha,0); - current_statement_begin__ = 40; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,1); - current_statement_begin__ = 41; - check_greater_or_equal(function__,"delta",delta,0); - current_statement_begin__ = 42; - check_greater_or_equal(function__,"tau",tau,RTbound); - check_less_or_equal(function__,"tau",tau,max(minRT)); - - // model body - - current_statement_begin__ = 54; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 60; - lp_accum__.add(normal_log(delta_pr, 0, 1)); - current_statement_begin__ = 61; - lp_accum__.add(normal_log(tau_pr, 0, 1)); - current_statement_begin__ = 64; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 66; - lp_accum__.add(wiener_log(stan::model::rvalue(RTu, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_max(get_base1(Nu,i,"Nu",1)), stan::model::nil_index_list())), "RTu"), get_base1(alpha,i,"alpha",1), get_base1(tau,i,"tau",1), get_base1(beta,i,"beta",1), get_base1(delta,i,"delta",1))); - current_statement_begin__ = 67; - lp_accum__.add(wiener_log(stan::model::rvalue(RTl, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_max(get_base1(Nl,i,"Nl",1)), stan::model::nil_index_list())), "RTl"), get_base1(alpha,i,"alpha",1), get_base1(tau,i,"tau",1), (1 - get_base1(beta,i,"beta",1)), -(get_base1(delta,i,"delta",1)))); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("delta_pr"); - names__.push_back("tau_pr"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("delta"); - names__.push_back("tau"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("mu_delta"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_choiceRT_ddm_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - vector_d delta_pr = in__.vector_constrain(N); - vector_d tau_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(delta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 39; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 40; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - current_statement_begin__ = 41; - validate_non_negative_index("delta", "N", N); - Eigen::Matrix delta(static_cast(N)); - (void) delta; // dummy to suppress unused var warning - - stan::math::initialize(delta, DUMMY_VAR__); - stan::math::fill(delta,DUMMY_VAR__); - current_statement_begin__ = 42; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 44; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 45; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))), - "assigning variable beta"); - current_statement_begin__ = 46; - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - ((Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * (get_base1(minRT,i,"minRT",1) - RTbound)) + RTbound), - "assigning variable tau"); - } - current_statement_begin__ = 48; - stan::math::assign(alpha, stan::math::exp(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr)))); - current_statement_begin__ = 49; - stan::math::assign(delta, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),delta_pr)))); - - // validate transformed parameters - current_statement_begin__ = 39; - check_greater_or_equal(function__,"alpha",alpha,0); - current_statement_begin__ = 40; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,1); - current_statement_begin__ = 41; - check_greater_or_equal(function__,"delta",delta,0); - current_statement_begin__ = 42; - check_greater_or_equal(function__,"tau",tau,RTbound); - check_less_or_equal(function__,"tau",tau,max(minRT)); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(delta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 74; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 75; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 76; - local_scalar_t__ mu_delta; - (void) mu_delta; // dummy to suppress unused var warning - - stan::math::initialize(mu_delta, DUMMY_VAR__); - stan::math::fill(mu_delta,DUMMY_VAR__); - current_statement_begin__ = 77; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - current_statement_begin__ = 80; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - - - current_statement_begin__ = 83; - stan::math::assign(mu_alpha, stan::math::exp(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 84; - stan::math::assign(mu_beta, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 85; - stan::math::assign(mu_delta, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - current_statement_begin__ = 86; - stan::math::assign(mu_tau, ((Phi_approx(get_base1(mu_p,4,"mu_p",1)) * (mean(minRT) - RTbound)) + RTbound)); - - current_statement_begin__ = 90; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 91; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - wiener_log(stan::model::rvalue(RTu, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_max(get_base1(Nu,i,"Nu",1)), stan::model::nil_index_list())), "RTu"),get_base1(alpha,i,"alpha",1),get_base1(tau,i,"tau",1),get_base1(beta,i,"beta",1),get_base1(delta,i,"delta",1)), - "assigning variable log_lik"); - current_statement_begin__ = 92; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + wiener_log(stan::model::rvalue(RTl, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_max(get_base1(Nl,i,"Nl",1)), stan::model::nil_index_list())), "RTl"),get_base1(alpha,i,"alpha",1),get_base1(tau,i,"tau",1),(1 - get_base1(beta,i,"beta",1)),-(get_base1(delta,i,"delta",1))))), - "assigning variable log_lik"); - } - - // validate generated quantities - current_statement_begin__ = 74; - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - current_statement_begin__ = 75; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,1); - current_statement_begin__ = 76; - check_greater_or_equal(function__,"mu_delta",mu_delta,0); - current_statement_begin__ = 77; - check_greater_or_equal(function__,"mu_tau",mu_tau,RTbound); - check_less_or_equal(function__,"mu_tau",mu_tau,max(minRT)); - current_statement_begin__ = 80; - - // write generated quantities - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - vars__.push_back(mu_delta); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_choiceRT_ddm"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "delta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "delta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_delta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "delta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "delta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_delta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_choiceRT_ddm_single_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_choiceRT_ddm_single"); - reader.add_event(57, 55, "end", "model_choiceRT_ddm_single"); - return reader; -} - -class model_choiceRT_ddm_single : public prob_grad { -private: - int Nu; - int Nl; - vector RTu; - vector RTl; - double minRT; - double RTbound; -public: - model_choiceRT_ddm_single(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_choiceRT_ddm_single(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_choiceRT_ddm_single_namespace::model_choiceRT_ddm_single"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "Nu", "int", context__.to_vec()); - Nu = int(0); - vals_i__ = context__.vals_i("Nu"); - pos__ = 0; - Nu = vals_i__[pos__++]; - current_statement_begin__ = 4; - context__.validate_dims("data initialization", "Nl", "int", context__.to_vec()); - Nl = int(0); - vals_i__ = context__.vals_i("Nl"); - pos__ = 0; - Nl = vals_i__[pos__++]; - current_statement_begin__ = 5; - validate_non_negative_index("RTu", "Nu", Nu); - context__.validate_dims("data initialization", "RTu", "double", context__.to_vec(Nu)); - validate_non_negative_index("RTu", "Nu", Nu); - RTu = std::vector(Nu,double(0)); - vals_r__ = context__.vals_r("RTu"); - pos__ = 0; - size_t RTu_limit_0__ = Nu; - for (size_t i_0__ = 0; i_0__ < RTu_limit_0__; ++i_0__) { - RTu[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("RTl", "Nl", Nl); - context__.validate_dims("data initialization", "RTl", "double", context__.to_vec(Nl)); - validate_non_negative_index("RTl", "Nl", Nl); - RTl = std::vector(Nl,double(0)); - vals_r__ = context__.vals_r("RTl"); - pos__ = 0; - size_t RTl_limit_0__ = Nl; - for (size_t i_0__ = 0; i_0__ < RTl_limit_0__; ++i_0__) { - RTl[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 7; - context__.validate_dims("data initialization", "minRT", "double", context__.to_vec()); - minRT = double(0); - vals_r__ = context__.vals_r("minRT"); - pos__ = 0; - minRT = vals_r__[pos__++]; - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "RTbound", "double", context__.to_vec()); - RTbound = double(0); - vals_r__ = context__.vals_r("RTbound"); - pos__ = 0; - RTbound = vals_r__[pos__++]; - - // validate, data variables - current_statement_begin__ = 3; - check_greater_or_equal(function__,"Nu",Nu,0); - current_statement_begin__ = 4; - check_greater_or_equal(function__,"Nl",Nl,0); - current_statement_begin__ = 5; - current_statement_begin__ = 6; - current_statement_begin__ = 7; - current_statement_begin__ = 8; - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 22; - ++num_params_r__; - current_statement_begin__ = 23; - ++num_params_r__; - current_statement_begin__ = 24; - ++num_params_r__; - current_statement_begin__ = 25; - ++num_params_r__; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_choiceRT_ddm_single() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("alpha"))) - throw std::runtime_error("variable alpha missing"); - vals_r__ = context__.vals_r("alpha"); - pos__ = 0U; - context__.validate_dims("initialization", "alpha", "double", context__.to_vec()); - double alpha(0); - alpha = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,alpha); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha: ") + e.what()); - } - - if (!(context__.contains_r("beta"))) - throw std::runtime_error("variable beta missing"); - vals_r__ = context__.vals_r("beta"); - pos__ = 0U; - context__.validate_dims("initialization", "beta", "double", context__.to_vec()); - double beta(0); - beta = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,1,beta); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta: ") + e.what()); - } - - if (!(context__.contains_r("delta"))) - throw std::runtime_error("variable delta missing"); - vals_r__ = context__.vals_r("delta"); - pos__ = 0U; - context__.validate_dims("initialization", "delta", "double", context__.to_vec()); - double delta(0); - delta = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,delta); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable delta: ") + e.what()); - } - - if (!(context__.contains_r("tau"))) - throw std::runtime_error("variable tau missing"); - vals_r__ = context__.vals_r("tau"); - pos__ = 0U; - context__.validate_dims("initialization", "tau", "double", context__.to_vec()); - double tau(0); - tau = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(RTbound,minRT,tau); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ alpha; - (void) alpha; // dummy to suppress unused var warning - if (jacobian__) - alpha = in__.scalar_lb_constrain(0,lp__); - else - alpha = in__.scalar_lb_constrain(0); - - local_scalar_t__ beta; - (void) beta; // dummy to suppress unused var warning - if (jacobian__) - beta = in__.scalar_lub_constrain(0,1,lp__); - else - beta = in__.scalar_lub_constrain(0,1); - - local_scalar_t__ delta; - (void) delta; // dummy to suppress unused var warning - if (jacobian__) - delta = in__.scalar_lb_constrain(0,lp__); - else - delta = in__.scalar_lb_constrain(0); - - local_scalar_t__ tau; - (void) tau; // dummy to suppress unused var warning - if (jacobian__) - tau = in__.scalar_lub_constrain(RTbound,minRT,lp__); - else - tau = in__.scalar_lub_constrain(RTbound,minRT); - - - // transformed parameters - - - - // validate transformed parameters - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - - // model body - - current_statement_begin__ = 29; - lp_accum__.add(uniform_log(alpha, 0, 5)); - current_statement_begin__ = 30; - lp_accum__.add(uniform_log(beta, 0, 1)); - current_statement_begin__ = 31; - lp_accum__.add(normal_log(delta, 0, 2)); - current_statement_begin__ = 32; - lp_accum__.add(uniform_log(tau, 0.10000000000000001, minRT)); - current_statement_begin__ = 34; - lp_accum__.add(wiener_log(RTu, alpha, tau, beta, delta)); - current_statement_begin__ = 35; - lp_accum__.add(wiener_log(RTl, alpha, tau, (1 - beta), -(delta))); - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("delta"); - names__.push_back("tau"); - names__.push_back("log_lik"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_choiceRT_ddm_single_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double alpha = in__.scalar_lb_constrain(0); - double beta = in__.scalar_lub_constrain(0,1); - double delta = in__.scalar_lb_constrain(0); - double tau = in__.scalar_lub_constrain(RTbound,minRT); - vars__.push_back(alpha); - vars__.push_back(beta); - vars__.push_back(delta); - vars__.push_back(tau); - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - - - - // validate transformed parameters - - // write transformed parameters - if (include_tparams__) { - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 41; - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - - - - current_statement_begin__ = 48; - stan::math::assign(log_lik, wiener_log(RTu,alpha,tau,beta,delta)); - current_statement_begin__ = 49; - stan::math::assign(log_lik, stan::model::deep_copy((log_lik + wiener_log(RTl,alpha,tau,(1 - beta),-(delta))))); - - // validate generated quantities - current_statement_begin__ = 41; - - // write generated quantities - vars__.push_back(log_lik); - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_choiceRT_ddm_single"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "delta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "tau"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "delta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "tau"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_choiceRT_lba_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_choiceRT_lba"); - reader.add_event(273, 271, "end", "model_choiceRT_lba"); - return reader; -} - -template -typename boost::math::tools::promote_args::type>::type -lba_pdf(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v_pdf, - const T4__& s, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - current_statement_begin__ = 6; - local_scalar_t__ b_A_tv_ts; - (void) b_A_tv_ts; // dummy to suppress unused var warning - - stan::math::initialize(b_A_tv_ts, DUMMY_VAR__); - stan::math::fill(b_A_tv_ts,DUMMY_VAR__); - current_statement_begin__ = 7; - local_scalar_t__ b_tv_ts; - (void) b_tv_ts; // dummy to suppress unused var warning - - stan::math::initialize(b_tv_ts, DUMMY_VAR__); - stan::math::fill(b_tv_ts,DUMMY_VAR__); - current_statement_begin__ = 8; - local_scalar_t__ term_1b; - (void) term_1b; // dummy to suppress unused var warning - - stan::math::initialize(term_1b, DUMMY_VAR__); - stan::math::fill(term_1b,DUMMY_VAR__); - current_statement_begin__ = 9; - local_scalar_t__ term_2b; - (void) term_2b; // dummy to suppress unused var warning - - stan::math::initialize(term_2b, DUMMY_VAR__); - stan::math::fill(term_2b,DUMMY_VAR__); - current_statement_begin__ = 10; - local_scalar_t__ term_3b; - (void) term_3b; // dummy to suppress unused var warning - - stan::math::initialize(term_3b, DUMMY_VAR__); - stan::math::fill(term_3b,DUMMY_VAR__); - current_statement_begin__ = 11; - local_scalar_t__ term_4b; - (void) term_4b; // dummy to suppress unused var warning - - stan::math::initialize(term_4b, DUMMY_VAR__); - stan::math::fill(term_4b,DUMMY_VAR__); - current_statement_begin__ = 12; - local_scalar_t__ pdf; - (void) pdf; // dummy to suppress unused var warning - - stan::math::initialize(pdf, DUMMY_VAR__); - stan::math::fill(pdf,DUMMY_VAR__); - - - current_statement_begin__ = 14; - stan::math::assign(b_A_tv_ts, (((b - A) - (t * v_pdf)) / (t * s))); - current_statement_begin__ = 15; - stan::math::assign(b_tv_ts, ((b - (t * v_pdf)) / (t * s))); - current_statement_begin__ = 17; - stan::math::assign(term_1b, (v_pdf * Phi(b_A_tv_ts))); - current_statement_begin__ = 18; - stan::math::assign(term_2b, (s * stan::math::exp(normal_log(stan::math::fabs(b_A_tv_ts),0,1)))); - current_statement_begin__ = 19; - stan::math::assign(term_3b, (v_pdf * Phi(b_tv_ts))); - current_statement_begin__ = 20; - stan::math::assign(term_4b, (s * stan::math::exp(normal_log(stan::math::fabs(b_tv_ts),0,1)))); - current_statement_begin__ = 22; - stan::math::assign(pdf, ((1 / A) * (((-(term_1b) + term_2b) + term_3b) - term_4b))); - current_statement_begin__ = 24; - return stan::math::promote_scalar(pdf); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_pdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v_pdf, - const T4__& s, std::ostream* pstream__) const { - return lba_pdf(t, b, A, v_pdf, s, pstream__); - } -}; - -template -typename boost::math::tools::promote_args::type>::type -lba_cdf(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v_cdf, - const T4__& s, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - current_statement_begin__ = 29; - local_scalar_t__ b_A_tv; - (void) b_A_tv; // dummy to suppress unused var warning - - stan::math::initialize(b_A_tv, DUMMY_VAR__); - stan::math::fill(b_A_tv,DUMMY_VAR__); - current_statement_begin__ = 30; - local_scalar_t__ b_tv; - (void) b_tv; // dummy to suppress unused var warning - - stan::math::initialize(b_tv, DUMMY_VAR__); - stan::math::fill(b_tv,DUMMY_VAR__); - current_statement_begin__ = 31; - local_scalar_t__ ts; - (void) ts; // dummy to suppress unused var warning - - stan::math::initialize(ts, DUMMY_VAR__); - stan::math::fill(ts,DUMMY_VAR__); - current_statement_begin__ = 32; - local_scalar_t__ term_1a; - (void) term_1a; // dummy to suppress unused var warning - - stan::math::initialize(term_1a, DUMMY_VAR__); - stan::math::fill(term_1a,DUMMY_VAR__); - current_statement_begin__ = 33; - local_scalar_t__ term_2a; - (void) term_2a; // dummy to suppress unused var warning - - stan::math::initialize(term_2a, DUMMY_VAR__); - stan::math::fill(term_2a,DUMMY_VAR__); - current_statement_begin__ = 34; - local_scalar_t__ term_3a; - (void) term_3a; // dummy to suppress unused var warning - - stan::math::initialize(term_3a, DUMMY_VAR__); - stan::math::fill(term_3a,DUMMY_VAR__); - current_statement_begin__ = 35; - local_scalar_t__ term_4a; - (void) term_4a; // dummy to suppress unused var warning - - stan::math::initialize(term_4a, DUMMY_VAR__); - stan::math::fill(term_4a,DUMMY_VAR__); - current_statement_begin__ = 36; - local_scalar_t__ cdf; - (void) cdf; // dummy to suppress unused var warning - - stan::math::initialize(cdf, DUMMY_VAR__); - stan::math::fill(cdf,DUMMY_VAR__); - - - current_statement_begin__ = 38; - stan::math::assign(b_A_tv, ((b - A) - (t * v_cdf))); - current_statement_begin__ = 39; - stan::math::assign(b_tv, (b - (t * v_cdf))); - current_statement_begin__ = 40; - stan::math::assign(ts, (t * s)); - current_statement_begin__ = 42; - stan::math::assign(term_1a, ((b_A_tv / A) * Phi((b_A_tv / ts)))); - current_statement_begin__ = 43; - stan::math::assign(term_2a, ((b_tv / A) * Phi((b_tv / ts)))); - current_statement_begin__ = 44; - stan::math::assign(term_3a, ((ts / A) * stan::math::exp(normal_log(stan::math::fabs((b_A_tv / ts)),0,1)))); - current_statement_begin__ = 45; - stan::math::assign(term_4a, ((ts / A) * stan::math::exp(normal_log(stan::math::fabs((b_tv / ts)),0,1)))); - current_statement_begin__ = 47; - stan::math::assign(cdf, ((((1 + term_1a) - term_2a) + term_3a) - term_4a)); - current_statement_begin__ = 49; - return stan::math::promote_scalar(cdf); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_cdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v_cdf, - const T4__& s, std::ostream* pstream__) const { - return lba_cdf(t, b, A, v_cdf, s, pstream__); - } -}; - -template -typename boost::math::tools::promote_args::type>::type -lba_lpdf(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - current_statement_begin__ = 54; - local_scalar_t__ t; - (void) t; // dummy to suppress unused var warning - - stan::math::initialize(t, DUMMY_VAR__); - stan::math::fill(t,DUMMY_VAR__); - current_statement_begin__ = 55; - local_scalar_t__ b; - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 56; - local_scalar_t__ cdf; - (void) cdf; // dummy to suppress unused var warning - - stan::math::initialize(cdf, DUMMY_VAR__); - stan::math::fill(cdf,DUMMY_VAR__); - current_statement_begin__ = 57; - local_scalar_t__ pdf; - (void) pdf; // dummy to suppress unused var warning - - stan::math::initialize(pdf, DUMMY_VAR__); - stan::math::fill(pdf,DUMMY_VAR__); - current_statement_begin__ = 58; - validate_non_negative_index("prob", "cols(RT)", cols(RT)); - Eigen::Matrix prob(static_cast(cols(RT))); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 59; - local_scalar_t__ out; - (void) out; // dummy to suppress unused var warning - - stan::math::initialize(out, DUMMY_VAR__); - stan::math::fill(out,DUMMY_VAR__); - current_statement_begin__ = 60; - local_scalar_t__ prob_neg; - (void) prob_neg; // dummy to suppress unused var warning - - stan::math::initialize(prob_neg, DUMMY_VAR__); - stan::math::fill(prob_neg,DUMMY_VAR__); - - - current_statement_begin__ = 62; - stan::math::assign(b, (A + d)); - current_statement_begin__ = 63; - for (int i = 1; i <= cols(RT); ++i) { - - current_statement_begin__ = 64; - stan::math::assign(t, (get_base1(RT,1,i,"RT",1) - tau)); - current_statement_begin__ = 65; - if (as_bool(logical_gt(t,0))) { - - current_statement_begin__ = 66; - stan::math::assign(cdf, 1); - current_statement_begin__ = 67; - for (int j = 1; j <= num_elements(v); ++j) { - - current_statement_begin__ = 68; - if (as_bool(logical_eq(get_base1(RT,2,i,"RT",1),j))) { - - current_statement_begin__ = 69; - stan::math::assign(pdf, lba_pdf(t,b,A,get_base1(v,j,"v",1),s, pstream__)); - } else { - - current_statement_begin__ = 71; - stan::math::assign(cdf, stan::model::deep_copy((lba_cdf(t,b,A,get_base1(v,j,"v",1),s, pstream__) * cdf))); - } - } - current_statement_begin__ = 74; - stan::math::assign(prob_neg, 1); - current_statement_begin__ = 75; - for (int j = 1; j <= num_elements(v); ++j) { - - current_statement_begin__ = 76; - stan::math::assign(prob_neg, stan::model::deep_copy((Phi((-(get_base1(v,j,"v",1)) / s)) * prob_neg))); - } - current_statement_begin__ = 78; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (pdf * (1 - cdf)), - "assigning variable prob"); - current_statement_begin__ = 79; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(prob,i,"prob",1) / (1 - prob_neg))), - "assigning variable prob"); - current_statement_begin__ = 80; - if (as_bool(logical_lt(get_base1(prob,i,"prob",1),1e-10))) { - - current_statement_begin__ = 81; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 1e-10, - "assigning variable prob"); - } - } else { - - current_statement_begin__ = 85; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 1e-10, - "assigning variable prob"); - } - } - current_statement_begin__ = 88; - stan::math::assign(out, sum(stan::math::log(prob))); - current_statement_begin__ = 89; - return stan::math::promote_scalar(out); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} -template -typename boost::math::tools::promote_args::type>::type -lba_lpdf(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) { - return lba_lpdf(RT,d,A,v,s,tau, pstream__); -} - - -struct lba_lpdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) const { - return lba_lpdf(RT, d, A, v, s, tau, pstream__); - } -}; - -template -Eigen::Matrix::type>::type, Eigen::Dynamic,1> -lba_rng(const T0__& d, - const T1__& A, - const Eigen::Matrix& v, - const T3__& s, - const T4__& tau, RNG& base_rng__, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - current_statement_begin__ = 94; - int get_pos_drift(0); - (void) get_pos_drift; // dummy to suppress unused var warning - - stan::math::fill(get_pos_drift, std::numeric_limits::min()); - current_statement_begin__ = 95; - int no_pos_drift(0); - (void) no_pos_drift; // dummy to suppress unused var warning - - stan::math::fill(no_pos_drift, std::numeric_limits::min()); - current_statement_begin__ = 96; - int get_first_pos(0); - (void) get_first_pos; // dummy to suppress unused var warning - - stan::math::fill(get_first_pos, std::numeric_limits::min()); - current_statement_begin__ = 97; - validate_non_negative_index("drift", "num_elements(v)", num_elements(v)); - Eigen::Matrix drift(static_cast(num_elements(v))); - (void) drift; // dummy to suppress unused var warning - - stan::math::initialize(drift, DUMMY_VAR__); - stan::math::fill(drift,DUMMY_VAR__); - current_statement_begin__ = 98; - int max_iter(0); - (void) max_iter; // dummy to suppress unused var warning - - stan::math::fill(max_iter, std::numeric_limits::min()); - current_statement_begin__ = 99; - int iter(0); - (void) iter; // dummy to suppress unused var warning - - stan::math::fill(iter, std::numeric_limits::min()); - current_statement_begin__ = 100; - validate_non_negative_index("start", "num_elements(v)", num_elements(v)); - vector start(num_elements(v)); - stan::math::initialize(start, DUMMY_VAR__); - stan::math::fill(start,DUMMY_VAR__); - current_statement_begin__ = 101; - validate_non_negative_index("ttf", "num_elements(v)", num_elements(v)); - vector ttf(num_elements(v)); - stan::math::initialize(ttf, DUMMY_VAR__); - stan::math::fill(ttf,DUMMY_VAR__); - current_statement_begin__ = 102; - validate_non_negative_index("resp", "num_elements(v)", num_elements(v)); - vector resp(num_elements(v), 0); - stan::math::fill(resp, std::numeric_limits::min()); - current_statement_begin__ = 103; - local_scalar_t__ rt; - (void) rt; // dummy to suppress unused var warning - - stan::math::initialize(rt, DUMMY_VAR__); - stan::math::fill(rt,DUMMY_VAR__); - current_statement_begin__ = 104; - validate_non_negative_index("pred", "2", 2); - Eigen::Matrix pred(static_cast(2)); - (void) pred; // dummy to suppress unused var warning - - stan::math::initialize(pred, DUMMY_VAR__); - stan::math::fill(pred,DUMMY_VAR__); - current_statement_begin__ = 105; - local_scalar_t__ b; - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - - - current_statement_begin__ = 108; - stan::math::assign(get_pos_drift, 1); - current_statement_begin__ = 109; - stan::math::assign(no_pos_drift, 0); - current_statement_begin__ = 110; - stan::math::assign(max_iter, 1000); - current_statement_begin__ = 111; - stan::math::assign(iter, 0); - current_statement_begin__ = 112; - while (as_bool(get_pos_drift)) { - - current_statement_begin__ = 113; - for (int j = 1; j <= num_elements(v); ++j) { - - current_statement_begin__ = 114; - stan::model::assign(drift, - stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), - normal_rng(get_base1(v,j,"v",1),s, base_rng__), - "assigning variable drift"); - current_statement_begin__ = 115; - if (as_bool(logical_gt(get_base1(drift,j,"drift",1),0))) { - - current_statement_begin__ = 116; - stan::math::assign(get_pos_drift, 0); - } - } - current_statement_begin__ = 119; - stan::math::assign(iter, stan::model::deep_copy((iter + 1))); - current_statement_begin__ = 120; - if (as_bool(logical_gt(iter,max_iter))) { - - current_statement_begin__ = 121; - stan::math::assign(get_pos_drift, 0); - current_statement_begin__ = 122; - stan::math::assign(no_pos_drift, 1); - } - } - current_statement_begin__ = 127; - if (as_bool(no_pos_drift)) { - - current_statement_begin__ = 128; - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - -(1), - "assigning variable pred"); - current_statement_begin__ = 129; - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - -(1), - "assigning variable pred"); - } else { - - current_statement_begin__ = 131; - stan::math::assign(b, (A + d)); - current_statement_begin__ = 132; - for (int i = 1; i <= num_elements(v); ++i) { - - current_statement_begin__ = 134; - stan::model::assign(start, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - uniform_rng(0,A, base_rng__), - "assigning variable start"); - current_statement_begin__ = 136; - stan::model::assign(ttf, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - ((b - get_base1(start,i,"start",1)) / get_base1(drift,i,"drift",1)), - "assigning variable ttf"); - } - current_statement_begin__ = 140; - stan::math::assign(resp, sort_indices_asc(ttf)); - current_statement_begin__ = 141; - stan::math::assign(ttf, stan::model::deep_copy(sort_asc(ttf))); - current_statement_begin__ = 142; - stan::math::assign(get_first_pos, 1); - current_statement_begin__ = 143; - stan::math::assign(iter, 1); - current_statement_begin__ = 144; - while (as_bool(get_first_pos)) { - - current_statement_begin__ = 145; - if (as_bool(logical_gt(get_base1(ttf,iter,"ttf",1),0))) { - - current_statement_begin__ = 146; - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - get_base1(ttf,iter,"ttf",1), - "assigning variable pred"); - current_statement_begin__ = 147; - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - get_base1(resp,iter,"resp",1), - "assigning variable pred"); - current_statement_begin__ = 148; - stan::math::assign(get_first_pos, 0); - } - current_statement_begin__ = 150; - stan::math::assign(iter, stan::model::deep_copy((iter + 1))); - } - } - current_statement_begin__ = 153; - return stan::math::promote_scalar(pred); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_rng_functor__ { - template - Eigen::Matrix::type>::type, Eigen::Dynamic,1> - operator()(const T0__& d, - const T1__& A, - const Eigen::Matrix& v, - const T3__& s, - const T4__& tau, RNG& base_rng__, std::ostream* pstream__) const { - return lba_rng(d, A, v, s, tau, base_rng__, pstream__); - } -}; - -class model_choiceRT_lba : public prob_grad { -private: - int N; - int Max_tr; - int N_choices; - int N_cond; - vector > N_tr_cond; - vector > RT; -public: - model_choiceRT_lba(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_choiceRT_lba(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_choiceRT_lba_namespace::model_choiceRT_lba"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 157; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 158; - context__.validate_dims("data initialization", "Max_tr", "int", context__.to_vec()); - Max_tr = int(0); - vals_i__ = context__.vals_i("Max_tr"); - pos__ = 0; - Max_tr = vals_i__[pos__++]; - current_statement_begin__ = 159; - context__.validate_dims("data initialization", "N_choices", "int", context__.to_vec()); - N_choices = int(0); - vals_i__ = context__.vals_i("N_choices"); - pos__ = 0; - N_choices = vals_i__[pos__++]; - current_statement_begin__ = 160; - context__.validate_dims("data initialization", "N_cond", "int", context__.to_vec()); - N_cond = int(0); - vals_i__ = context__.vals_i("N_cond"); - pos__ = 0; - N_cond = vals_i__[pos__++]; - current_statement_begin__ = 161; - validate_non_negative_index("N_tr_cond", "N", N); - validate_non_negative_index("N_tr_cond", "N_cond", N_cond); - context__.validate_dims("data initialization", "N_tr_cond", "int", context__.to_vec(N,N_cond)); - validate_non_negative_index("N_tr_cond", "N", N); - validate_non_negative_index("N_tr_cond", "N_cond", N_cond); - N_tr_cond = std::vector >(N,std::vector(N_cond,int(0))); - vals_i__ = context__.vals_i("N_tr_cond"); - pos__ = 0; - size_t N_tr_cond_limit_1__ = N_cond; - for (size_t i_1__ = 0; i_1__ < N_tr_cond_limit_1__; ++i_1__) { - size_t N_tr_cond_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < N_tr_cond_limit_0__; ++i_0__) { - N_tr_cond[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 162; - validate_non_negative_index("RT", "N", N); - validate_non_negative_index("RT", "N_cond", N_cond); - validate_non_negative_index("RT", "2", 2); - validate_non_negative_index("RT", "Max_tr", Max_tr); - context__.validate_dims("data initialization", "RT", "matrix_d", context__.to_vec(N,N_cond,2,Max_tr)); - validate_non_negative_index("RT", "N", N); - validate_non_negative_index("RT", "N_cond", N_cond); - validate_non_negative_index("RT", "2", 2); - validate_non_negative_index("RT", "Max_tr", Max_tr); - RT = std::vector >(N,std::vector(N_cond,matrix_d(static_cast(2),static_cast(Max_tr)))); - vals_r__ = context__.vals_r("RT"); - pos__ = 0; - size_t RT_m_mat_lim__ = 2; - size_t RT_n_mat_lim__ = Max_tr; - for (size_t n_mat__ = 0; n_mat__ < RT_n_mat_lim__; ++n_mat__) { - for (size_t m_mat__ = 0; m_mat__ < RT_m_mat_lim__; ++m_mat__) { - size_t RT_limit_1__ = N_cond; - for (size_t i_1__ = 0; i_1__ < RT_limit_1__; ++i_1__) { - size_t RT_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < RT_limit_0__; ++i_0__) { - RT[i_0__][i_1__](m_mat__,n_mat__) = vals_r__[pos__++]; - } - } - } - } - - // validate, data variables - current_statement_begin__ = 157; - current_statement_begin__ = 158; - current_statement_begin__ = 159; - current_statement_begin__ = 160; - current_statement_begin__ = 161; - current_statement_begin__ = 162; - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 168; - ++num_params_r__; - current_statement_begin__ = 169; - ++num_params_r__; - current_statement_begin__ = 170; - ++num_params_r__; - current_statement_begin__ = 171; - validate_non_negative_index("mu_v", "N_choices", N_choices); - validate_non_negative_index("mu_v", "N_cond", N_cond); - num_params_r__ += N_choices * N_cond; - current_statement_begin__ = 174; - ++num_params_r__; - current_statement_begin__ = 175; - ++num_params_r__; - current_statement_begin__ = 176; - ++num_params_r__; - current_statement_begin__ = 177; - validate_non_negative_index("sigma_v", "N_choices", N_choices); - validate_non_negative_index("sigma_v", "N_cond", N_cond); - num_params_r__ += N_choices * N_cond; - current_statement_begin__ = 180; - validate_non_negative_index("d", "N", N); - num_params_r__ += N; - current_statement_begin__ = 181; - validate_non_negative_index("A", "N", N); - num_params_r__ += N; - current_statement_begin__ = 182; - validate_non_negative_index("tau", "N", N); - num_params_r__ += N; - current_statement_begin__ = 183; - validate_non_negative_index("v", "N_choices", N_choices); - validate_non_negative_index("v", "N", N); - validate_non_negative_index("v", "N_cond", N_cond); - num_params_r__ += N_choices * N * N_cond; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_choiceRT_lba() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_d"))) - throw std::runtime_error("variable mu_d missing"); - vals_r__ = context__.vals_r("mu_d"); - pos__ = 0U; - context__.validate_dims("initialization", "mu_d", "double", context__.to_vec()); - double mu_d(0); - mu_d = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,mu_d); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_d: ") + e.what()); - } - - if (!(context__.contains_r("mu_A"))) - throw std::runtime_error("variable mu_A missing"); - vals_r__ = context__.vals_r("mu_A"); - pos__ = 0U; - context__.validate_dims("initialization", "mu_A", "double", context__.to_vec()); - double mu_A(0); - mu_A = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,mu_A); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_A: ") + e.what()); - } - - if (!(context__.contains_r("mu_tau"))) - throw std::runtime_error("variable mu_tau missing"); - vals_r__ = context__.vals_r("mu_tau"); - pos__ = 0U; - context__.validate_dims("initialization", "mu_tau", "double", context__.to_vec()); - double mu_tau(0); - mu_tau = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,mu_tau); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_tau: ") + e.what()); - } - - if (!(context__.contains_r("mu_v"))) - throw std::runtime_error("variable mu_v missing"); - vals_r__ = context__.vals_r("mu_v"); - pos__ = 0U; - validate_non_negative_index("mu_v", "N_cond", N_cond); - validate_non_negative_index("mu_v", "N_choices", N_choices); - context__.validate_dims("initialization", "mu_v", "vector_d", context__.to_vec(N_cond,N_choices)); - std::vector mu_v(N_cond,vector_d(static_cast(N_choices))); - for (int j1__ = 0U; j1__ < N_choices; ++j1__) - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - mu_v[i0__](j1__) = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - try { - writer__.vector_lb_unconstrain(0,mu_v[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_v: ") + e.what()); - } - - if (!(context__.contains_r("sigma_d"))) - throw std::runtime_error("variable sigma_d missing"); - vals_r__ = context__.vals_r("sigma_d"); - pos__ = 0U; - context__.validate_dims("initialization", "sigma_d", "double", context__.to_vec()); - double sigma_d(0); - sigma_d = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,sigma_d); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma_d: ") + e.what()); - } - - if (!(context__.contains_r("sigma_A"))) - throw std::runtime_error("variable sigma_A missing"); - vals_r__ = context__.vals_r("sigma_A"); - pos__ = 0U; - context__.validate_dims("initialization", "sigma_A", "double", context__.to_vec()); - double sigma_A(0); - sigma_A = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,sigma_A); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma_A: ") + e.what()); - } - - if (!(context__.contains_r("sigma_tau"))) - throw std::runtime_error("variable sigma_tau missing"); - vals_r__ = context__.vals_r("sigma_tau"); - pos__ = 0U; - context__.validate_dims("initialization", "sigma_tau", "double", context__.to_vec()); - double sigma_tau(0); - sigma_tau = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,sigma_tau); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma_tau: ") + e.what()); - } - - if (!(context__.contains_r("sigma_v"))) - throw std::runtime_error("variable sigma_v missing"); - vals_r__ = context__.vals_r("sigma_v"); - pos__ = 0U; - validate_non_negative_index("sigma_v", "N_cond", N_cond); - validate_non_negative_index("sigma_v", "N_choices", N_choices); - context__.validate_dims("initialization", "sigma_v", "vector_d", context__.to_vec(N_cond,N_choices)); - std::vector sigma_v(N_cond,vector_d(static_cast(N_choices))); - for (int j1__ = 0U; j1__ < N_choices; ++j1__) - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - sigma_v[i0__](j1__) = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - try { - writer__.vector_lb_unconstrain(0,sigma_v[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma_v: ") + e.what()); - } - - if (!(context__.contains_r("d"))) - throw std::runtime_error("variable d missing"); - vals_r__ = context__.vals_r("d"); - pos__ = 0U; - validate_non_negative_index("d", "N", N); - context__.validate_dims("initialization", "d", "double", context__.to_vec(N)); - std::vector d(N,double(0)); - for (int i0__ = 0U; i0__ < N; ++i0__) - d[i0__] = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N; ++i0__) - try { - writer__.scalar_lb_unconstrain(0,d[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable d: ") + e.what()); - } - - if (!(context__.contains_r("A"))) - throw std::runtime_error("variable A missing"); - vals_r__ = context__.vals_r("A"); - pos__ = 0U; - validate_non_negative_index("A", "N", N); - context__.validate_dims("initialization", "A", "double", context__.to_vec(N)); - std::vector A(N,double(0)); - for (int i0__ = 0U; i0__ < N; ++i0__) - A[i0__] = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N; ++i0__) - try { - writer__.scalar_lb_unconstrain(0,A[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A: ") + e.what()); - } - - if (!(context__.contains_r("tau"))) - throw std::runtime_error("variable tau missing"); - vals_r__ = context__.vals_r("tau"); - pos__ = 0U; - validate_non_negative_index("tau", "N", N); - context__.validate_dims("initialization", "tau", "double", context__.to_vec(N)); - std::vector tau(N,double(0)); - for (int i0__ = 0U; i0__ < N; ++i0__) - tau[i0__] = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N; ++i0__) - try { - writer__.scalar_lb_unconstrain(0,tau[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau: ") + e.what()); - } - - if (!(context__.contains_r("v"))) - throw std::runtime_error("variable v missing"); - vals_r__ = context__.vals_r("v"); - pos__ = 0U; - validate_non_negative_index("v", "N", N); - validate_non_negative_index("v", "N_cond", N_cond); - validate_non_negative_index("v", "N_choices", N_choices); - context__.validate_dims("initialization", "v", "vector_d", context__.to_vec(N,N_cond,N_choices)); - std::vector > v(N,std::vector(N_cond,vector_d(static_cast(N_choices)))); - for (int j1__ = 0U; j1__ < N_choices; ++j1__) - for (int i1__ = 0U; i1__ < N_cond; ++i1__) - for (int i0__ = 0U; i0__ < N; ++i0__) - v[i0__][i1__](j1__) = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N; ++i0__) - for (int i1__ = 0U; i1__ < N_cond; ++i1__) - try { - writer__.vector_lb_unconstrain(0,v[i0__][i1__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable v: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ mu_d; - (void) mu_d; // dummy to suppress unused var warning - if (jacobian__) - mu_d = in__.scalar_lb_constrain(0,lp__); - else - mu_d = in__.scalar_lb_constrain(0); - - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - if (jacobian__) - mu_A = in__.scalar_lb_constrain(0,lp__); - else - mu_A = in__.scalar_lb_constrain(0); - - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - if (jacobian__) - mu_tau = in__.scalar_lb_constrain(0,lp__); - else - mu_tau = in__.scalar_lb_constrain(0); - - vector > mu_v; - size_t dim_mu_v_0__ = N_cond; - mu_v.reserve(dim_mu_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_mu_v_0__; ++k_0__) { - if (jacobian__) - mu_v.push_back(in__.vector_lb_constrain(0,N_choices,lp__)); - else - mu_v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - - local_scalar_t__ sigma_d; - (void) sigma_d; // dummy to suppress unused var warning - if (jacobian__) - sigma_d = in__.scalar_lb_constrain(0,lp__); - else - sigma_d = in__.scalar_lb_constrain(0); - - local_scalar_t__ sigma_A; - (void) sigma_A; // dummy to suppress unused var warning - if (jacobian__) - sigma_A = in__.scalar_lb_constrain(0,lp__); - else - sigma_A = in__.scalar_lb_constrain(0); - - local_scalar_t__ sigma_tau; - (void) sigma_tau; // dummy to suppress unused var warning - if (jacobian__) - sigma_tau = in__.scalar_lb_constrain(0,lp__); - else - sigma_tau = in__.scalar_lb_constrain(0); - - vector > sigma_v; - size_t dim_sigma_v_0__ = N_cond; - sigma_v.reserve(dim_sigma_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_sigma_v_0__; ++k_0__) { - if (jacobian__) - sigma_v.push_back(in__.vector_lb_constrain(0,N_choices,lp__)); - else - sigma_v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - - vector d; - size_t dim_d_0__ = N; - d.reserve(dim_d_0__); - for (size_t k_0__ = 0; k_0__ < dim_d_0__; ++k_0__) { - if (jacobian__) - d.push_back(in__.scalar_lb_constrain(0,lp__)); - else - d.push_back(in__.scalar_lb_constrain(0)); - } - - vector A; - size_t dim_A_0__ = N; - A.reserve(dim_A_0__); - for (size_t k_0__ = 0; k_0__ < dim_A_0__; ++k_0__) { - if (jacobian__) - A.push_back(in__.scalar_lb_constrain(0,lp__)); - else - A.push_back(in__.scalar_lb_constrain(0)); - } - - vector tau; - size_t dim_tau_0__ = N; - tau.reserve(dim_tau_0__); - for (size_t k_0__ = 0; k_0__ < dim_tau_0__; ++k_0__) { - if (jacobian__) - tau.push_back(in__.scalar_lb_constrain(0,lp__)); - else - tau.push_back(in__.scalar_lb_constrain(0)); - } - - vector > > v; - size_t dim_v_0__ = N; - v.resize(dim_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_v_0__; ++k_0__) { - size_t dim_v_1__ = N_cond; - v[k_0__].reserve(dim_v_1__); - for (size_t k_1__ = 0; k_1__ < dim_v_1__; ++k_1__) { - if (jacobian__) - v[k_0__].push_back(in__.vector_lb_constrain(0,N_choices,lp__)); - else - v[k_0__].push_back(in__.vector_lb_constrain(0,N_choices)); - } - } - - - // transformed parameters - current_statement_begin__ = 187; - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - - - current_statement_begin__ = 188; - stan::math::assign(s, 1); - - // validate transformed parameters - if (stan::math::is_uninitialized(s)) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: s"; - throw std::runtime_error(msg__.str()); - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 187; - - // model body - - current_statement_begin__ = 192; - lp_accum__.add(normal_log(mu_d, 0.5, 1)); - if (mu_d < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 1)); - current_statement_begin__ = 193; - lp_accum__.add(normal_log(mu_A, 0.5, 1)); - if (mu_A < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 1)); - current_statement_begin__ = 194; - lp_accum__.add(normal_log(mu_tau, 0.5, 0.5)); - if (mu_tau < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 0.5)); - current_statement_begin__ = 197; - lp_accum__.add(gamma_log(sigma_d, 1, 1)); - current_statement_begin__ = 198; - lp_accum__.add(gamma_log(sigma_A, 1, 1)); - current_statement_begin__ = 199; - lp_accum__.add(gamma_log(sigma_tau, 1, 1)); - current_statement_begin__ = 202; - for (int j = 1; j <= N_cond; ++j) { - - current_statement_begin__ = 203; - for (int n = 1; n <= N_choices; ++n) { - - current_statement_begin__ = 204; - lp_accum__.add(normal_log(get_base1(get_base1(mu_v,j,"mu_v",1),n,"mu_v",2), 2, 1)); - if (get_base1(get_base1(mu_v,j,"mu_v",1),n,"mu_v",2) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 2, 1)); - current_statement_begin__ = 205; - lp_accum__.add(gamma_log(get_base1(get_base1(sigma_v,j,"sigma_v",1),n,"sigma_v",2), 1, 1)); - } - } - current_statement_begin__ = 209; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 211; - int n_trials(0); - (void) n_trials; // dummy to suppress unused var warning - - stan::math::fill(n_trials, std::numeric_limits::min()); - - - current_statement_begin__ = 214; - lp_accum__.add(normal_log(get_base1(d,i,"d",1), mu_d, sigma_d)); - if (get_base1(d,i,"d",1) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, mu_d, sigma_d)); - current_statement_begin__ = 215; - lp_accum__.add(normal_log(get_base1(A,i,"A",1), mu_A, sigma_A)); - if (get_base1(A,i,"A",1) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, mu_A, sigma_A)); - current_statement_begin__ = 216; - lp_accum__.add(normal_log(get_base1(tau,i,"tau",1), mu_tau, sigma_tau)); - if (get_base1(tau,i,"tau",1) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, mu_tau, sigma_tau)); - current_statement_begin__ = 218; - for (int j = 1; j <= N_cond; ++j) { - - current_statement_begin__ = 220; - stan::math::assign(n_trials, get_base1(get_base1(N_tr_cond,i,"N_tr_cond",1),j,"N_tr_cond",2)); - current_statement_begin__ = 222; - for (int n = 1; n <= N_choices; ++n) { - - current_statement_begin__ = 224; - lp_accum__.add(normal_log(get_base1(get_base1(get_base1(v,i,"v",1),j,"v",2),n,"v",3), get_base1(get_base1(mu_v,j,"mu_v",1),n,"mu_v",2), get_base1(get_base1(sigma_v,j,"sigma_v",1),n,"sigma_v",2))); - if (get_base1(get_base1(get_base1(v,i,"v",1),j,"v",2),n,"v",3) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, get_base1(get_base1(mu_v,j,"mu_v",1),n,"mu_v",2), get_base1(get_base1(sigma_v,j,"sigma_v",1),n,"sigma_v",2))); - } - current_statement_begin__ = 227; - lp_accum__.add(lba_lpdf(stan::model::rvalue(RT, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_min_max(1, n_trials), stan::model::nil_index_list())))), "RT"), get_base1(d,i,"d",1), get_base1(A,i,"A",1), stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "v"), s, get_base1(tau,i,"tau",1), pstream__)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_d"); - names__.push_back("mu_A"); - names__.push_back("mu_tau"); - names__.push_back("mu_v"); - names__.push_back("sigma_d"); - names__.push_back("sigma_A"); - names__.push_back("sigma_tau"); - names__.push_back("sigma_v"); - names__.push_back("d"); - names__.push_back("A"); - names__.push_back("tau"); - names__.push_back("v"); - names__.push_back("s"); - names__.push_back("n_trials"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N_cond); - dims__.push_back(N_choices); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N_cond); - dims__.push_back(N_choices); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(N_cond); - dims__.push_back(N_choices); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(N_cond); - dims__.push_back(2); - dims__.push_back(Max_tr); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_choiceRT_lba_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double mu_d = in__.scalar_lb_constrain(0); - double mu_A = in__.scalar_lb_constrain(0); - double mu_tau = in__.scalar_lb_constrain(0); - vector mu_v; - size_t dim_mu_v_0__ = N_cond; - for (size_t k_0__ = 0; k_0__ < dim_mu_v_0__; ++k_0__) { - mu_v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - double sigma_d = in__.scalar_lb_constrain(0); - double sigma_A = in__.scalar_lb_constrain(0); - double sigma_tau = in__.scalar_lb_constrain(0); - vector sigma_v; - size_t dim_sigma_v_0__ = N_cond; - for (size_t k_0__ = 0; k_0__ < dim_sigma_v_0__; ++k_0__) { - sigma_v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - vector d; - size_t dim_d_0__ = N; - for (size_t k_0__ = 0; k_0__ < dim_d_0__; ++k_0__) { - d.push_back(in__.scalar_lb_constrain(0)); - } - vector A; - size_t dim_A_0__ = N; - for (size_t k_0__ = 0; k_0__ < dim_A_0__; ++k_0__) { - A.push_back(in__.scalar_lb_constrain(0)); - } - vector tau; - size_t dim_tau_0__ = N; - for (size_t k_0__ = 0; k_0__ < dim_tau_0__; ++k_0__) { - tau.push_back(in__.scalar_lb_constrain(0)); - } - vector > v; - size_t dim_v_0__ = N; - v.resize(dim_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_v_0__; ++k_0__) { - size_t dim_v_1__ = N_cond; - for (size_t k_1__ = 0; k_1__ < dim_v_1__; ++k_1__) { - v[k_0__].push_back(in__.vector_lb_constrain(0,N_choices)); - } - } - vars__.push_back(mu_d); - vars__.push_back(mu_A); - vars__.push_back(mu_tau); - for (int k_1__ = 0; k_1__ < N_choices; ++k_1__) { - for (int k_0__ = 0; k_0__ < N_cond; ++k_0__) { - vars__.push_back(mu_v[k_0__][k_1__]); - } - } - vars__.push_back(sigma_d); - vars__.push_back(sigma_A); - vars__.push_back(sigma_tau); - for (int k_1__ = 0; k_1__ < N_choices; ++k_1__) { - for (int k_0__ = 0; k_0__ < N_cond; ++k_0__) { - vars__.push_back(sigma_v[k_0__][k_1__]); - } - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(d[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - for (int k_2__ = 0; k_2__ < N_choices; ++k_2__) { - for (int k_1__ = 0; k_1__ < N_cond; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(v[k_0__][k_1__][k_2__]); - } - } - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 187; - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - - - current_statement_begin__ = 188; - stan::math::assign(s, 1); - - // validate transformed parameters - current_statement_begin__ = 187; - - // write transformed parameters - if (include_tparams__) { - vars__.push_back(s); - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 234; - int n_trials(0); - (void) n_trials; // dummy to suppress unused var warning - - stan::math::fill(n_trials, std::numeric_limits::min()); - current_statement_begin__ = 237; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 240; - validate_non_negative_index("y_pred", "2", 2); - validate_non_negative_index("y_pred", "Max_tr", Max_tr); - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "N_cond", N_cond); - vector > > y_pred(N, (vector >(N_cond, (Eigen::Matrix (static_cast(2),static_cast(Max_tr)))))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 243; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 244; - for (int j = 1; j <= N_cond; ++j) { - - current_statement_begin__ = 245; - for (int t = 1; t <= Max_tr; ++t) { - - current_statement_begin__ = 246; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())))), - rep_vector(-(1),2), - "assigning variable y_pred"); - } - } - } - - current_statement_begin__ = 252; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 254; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 256; - for (int j = 1; j <= N_cond; ++j) { - - current_statement_begin__ = 258; - stan::math::assign(n_trials, get_base1(get_base1(N_tr_cond,i,"N_tr_cond",1),j,"N_tr_cond",2)); - current_statement_begin__ = 261; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + lba_lpdf(stan::model::rvalue(RT, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_min_max(1, n_trials), stan::model::nil_index_list())))), "RT"),get_base1(d,i,"d",1),get_base1(A,i,"A",1),stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "v"),s,get_base1(tau,i,"tau",1), pstream__))), - "assigning variable log_lik"); - current_statement_begin__ = 263; - for (int t = 1; t <= n_trials; ++t) { - - current_statement_begin__ = 265; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())))), - lba_rng(get_base1(d,i,"d",1),get_base1(A,i,"A",1),stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "v"),s,get_base1(tau,i,"tau",1), base_rng__, pstream__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 234; - current_statement_begin__ = 237; - current_statement_begin__ = 240; - - // write generated quantities - vars__.push_back(n_trials); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_3__ = 0; k_3__ < Max_tr; ++k_3__) { - for (int k_2__ = 0; k_2__ < 2; ++k_2__) { - for (int k_1__ = 0; k_1__ < N_cond; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__](k_2__, k_3__)); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_choiceRT_lba"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= N_choices; ++k_2__) { - for (int k_1__ = 1; k_1__ <= N_cond; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "v" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "n_trials"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_3__ = 1; k_3__ <= Max_tr; ++k_3__) { - for (int k_2__ = 1; k_2__ <= 2; ++k_2__) { - for (int k_1__ = 1; k_1__ <= N_cond; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__ << '.' << k_3__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma_v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= N_choices; ++k_2__) { - for (int k_1__ = 1; k_1__ <= N_cond; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "v" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "n_trials"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_3__ = 1; k_3__ <= Max_tr; ++k_3__) { - for (int k_2__ = 1; k_2__ <= 2; ++k_2__) { - for (int k_1__ = 1; k_1__ <= N_cond; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__ << '.' << k_3__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_choiceRT_lba_single_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_choiceRT_lba_single"); - reader.add_event(234, 232, "end", "model_choiceRT_lba_single"); - return reader; -} - -template -typename boost::math::tools::promote_args::type>::type -lba_pdf(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v, - const T4__& s, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - current_statement_begin__ = 6; - local_scalar_t__ b_A_tv_ts; - (void) b_A_tv_ts; // dummy to suppress unused var warning - - stan::math::initialize(b_A_tv_ts, DUMMY_VAR__); - stan::math::fill(b_A_tv_ts,DUMMY_VAR__); - current_statement_begin__ = 7; - local_scalar_t__ b_tv_ts; - (void) b_tv_ts; // dummy to suppress unused var warning - - stan::math::initialize(b_tv_ts, DUMMY_VAR__); - stan::math::fill(b_tv_ts,DUMMY_VAR__); - current_statement_begin__ = 8; - local_scalar_t__ term_1; - (void) term_1; // dummy to suppress unused var warning - - stan::math::initialize(term_1, DUMMY_VAR__); - stan::math::fill(term_1,DUMMY_VAR__); - current_statement_begin__ = 9; - local_scalar_t__ term_2; - (void) term_2; // dummy to suppress unused var warning - - stan::math::initialize(term_2, DUMMY_VAR__); - stan::math::fill(term_2,DUMMY_VAR__); - current_statement_begin__ = 10; - local_scalar_t__ term_3; - (void) term_3; // dummy to suppress unused var warning - - stan::math::initialize(term_3, DUMMY_VAR__); - stan::math::fill(term_3,DUMMY_VAR__); - current_statement_begin__ = 11; - local_scalar_t__ term_4; - (void) term_4; // dummy to suppress unused var warning - - stan::math::initialize(term_4, DUMMY_VAR__); - stan::math::fill(term_4,DUMMY_VAR__); - current_statement_begin__ = 12; - local_scalar_t__ pdf; - (void) pdf; // dummy to suppress unused var warning - - stan::math::initialize(pdf, DUMMY_VAR__); - stan::math::fill(pdf,DUMMY_VAR__); - - - current_statement_begin__ = 14; - stan::math::assign(b_A_tv_ts, (((b - A) - (t * v)) / (t * s))); - current_statement_begin__ = 15; - stan::math::assign(b_tv_ts, ((b - (t * v)) / (t * s))); - current_statement_begin__ = 17; - stan::math::assign(term_1, (v * Phi(b_A_tv_ts))); - current_statement_begin__ = 18; - stan::math::assign(term_2, (s * stan::math::exp(normal_log(b_A_tv_ts,0,1)))); - current_statement_begin__ = 19; - stan::math::assign(term_3, (v * Phi(b_tv_ts))); - current_statement_begin__ = 20; - stan::math::assign(term_4, (s * stan::math::exp(normal_log(b_tv_ts,0,1)))); - current_statement_begin__ = 22; - stan::math::assign(pdf, ((1 / A) * (((-(term_1) + term_2) + term_3) - term_4))); - current_statement_begin__ = 24; - return stan::math::promote_scalar(pdf); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_pdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v, - const T4__& s, std::ostream* pstream__) const { - return lba_pdf(t, b, A, v, s, pstream__); - } -}; - -template -typename boost::math::tools::promote_args::type>::type -lba_cdf(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v, - const T4__& s, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - current_statement_begin__ = 29; - local_scalar_t__ b_A_tv; - (void) b_A_tv; // dummy to suppress unused var warning - - stan::math::initialize(b_A_tv, DUMMY_VAR__); - stan::math::fill(b_A_tv,DUMMY_VAR__); - current_statement_begin__ = 30; - local_scalar_t__ b_tv; - (void) b_tv; // dummy to suppress unused var warning - - stan::math::initialize(b_tv, DUMMY_VAR__); - stan::math::fill(b_tv,DUMMY_VAR__); - current_statement_begin__ = 31; - local_scalar_t__ ts; - (void) ts; // dummy to suppress unused var warning - - stan::math::initialize(ts, DUMMY_VAR__); - stan::math::fill(ts,DUMMY_VAR__); - current_statement_begin__ = 32; - local_scalar_t__ term_1; - (void) term_1; // dummy to suppress unused var warning - - stan::math::initialize(term_1, DUMMY_VAR__); - stan::math::fill(term_1,DUMMY_VAR__); - current_statement_begin__ = 33; - local_scalar_t__ term_2; - (void) term_2; // dummy to suppress unused var warning - - stan::math::initialize(term_2, DUMMY_VAR__); - stan::math::fill(term_2,DUMMY_VAR__); - current_statement_begin__ = 34; - local_scalar_t__ term_3; - (void) term_3; // dummy to suppress unused var warning - - stan::math::initialize(term_3, DUMMY_VAR__); - stan::math::fill(term_3,DUMMY_VAR__); - current_statement_begin__ = 35; - local_scalar_t__ term_4; - (void) term_4; // dummy to suppress unused var warning - - stan::math::initialize(term_4, DUMMY_VAR__); - stan::math::fill(term_4,DUMMY_VAR__); - current_statement_begin__ = 36; - local_scalar_t__ cdf; - (void) cdf; // dummy to suppress unused var warning - - stan::math::initialize(cdf, DUMMY_VAR__); - stan::math::fill(cdf,DUMMY_VAR__); - - - current_statement_begin__ = 38; - stan::math::assign(b_A_tv, ((b - A) - (t * v))); - current_statement_begin__ = 39; - stan::math::assign(b_tv, (b - (t * v))); - current_statement_begin__ = 40; - stan::math::assign(ts, (t * s)); - current_statement_begin__ = 42; - stan::math::assign(term_1, ((b_A_tv / A) * Phi((b_A_tv / ts)))); - current_statement_begin__ = 43; - stan::math::assign(term_2, ((b_tv / A) * Phi((b_tv / ts)))); - current_statement_begin__ = 44; - stan::math::assign(term_3, ((ts / A) * stan::math::exp(normal_log((b_A_tv / ts),0,1)))); - current_statement_begin__ = 45; - stan::math::assign(term_4, ((ts / A) * stan::math::exp(normal_log((b_tv / ts),0,1)))); - current_statement_begin__ = 47; - stan::math::assign(cdf, ((((1 + term_1) - term_2) + term_3) - term_4)); - current_statement_begin__ = 49; - return stan::math::promote_scalar(cdf); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_cdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& t, - const T1__& b, - const T2__& A, - const T3__& v, - const T4__& s, std::ostream* pstream__) const { - return lba_cdf(t, b, A, v, s, pstream__); - } -}; - -template -typename boost::math::tools::promote_args::type>::type -lba_lpdf(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - current_statement_begin__ = 55; - local_scalar_t__ t; - (void) t; // dummy to suppress unused var warning - - stan::math::initialize(t, DUMMY_VAR__); - stan::math::fill(t,DUMMY_VAR__); - current_statement_begin__ = 56; - local_scalar_t__ b; - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 57; - local_scalar_t__ cdf; - (void) cdf; // dummy to suppress unused var warning - - stan::math::initialize(cdf, DUMMY_VAR__); - stan::math::fill(cdf,DUMMY_VAR__); - current_statement_begin__ = 58; - local_scalar_t__ pdf; - (void) pdf; // dummy to suppress unused var warning - - stan::math::initialize(pdf, DUMMY_VAR__); - stan::math::fill(pdf,DUMMY_VAR__); - current_statement_begin__ = 59; - validate_non_negative_index("prob", "rows(RT)", rows(RT)); - Eigen::Matrix prob(static_cast(rows(RT))); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 60; - local_scalar_t__ out; - (void) out; // dummy to suppress unused var warning - - stan::math::initialize(out, DUMMY_VAR__); - stan::math::fill(out,DUMMY_VAR__); - current_statement_begin__ = 61; - local_scalar_t__ prob_neg; - (void) prob_neg; // dummy to suppress unused var warning - - stan::math::initialize(prob_neg, DUMMY_VAR__); - stan::math::fill(prob_neg,DUMMY_VAR__); - - - current_statement_begin__ = 63; - stan::math::assign(b, (A + d)); - current_statement_begin__ = 64; - for (int i = 1; i <= rows(RT); ++i) { - - current_statement_begin__ = 65; - stan::math::assign(t, (get_base1(RT,1,i,"RT",1) - tau)); - current_statement_begin__ = 66; - if (as_bool(logical_gt(t,0))) { - - current_statement_begin__ = 67; - stan::math::assign(cdf, 1); - current_statement_begin__ = 69; - for (int j = 1; j <= num_elements(v); ++j) { - - current_statement_begin__ = 70; - if (as_bool(logical_eq(get_base1(RT,2,i,"RT",1),j))) { - - current_statement_begin__ = 71; - stan::math::assign(pdf, lba_pdf(t,b,A,get_base1(v,j,"v",1),s, pstream__)); - } else { - - current_statement_begin__ = 73; - stan::math::assign(cdf, stan::model::deep_copy(((1 - lba_cdf(t,b,A,get_base1(v,j,"v",1),s, pstream__)) * cdf))); - } - } - current_statement_begin__ = 76; - stan::math::assign(prob_neg, 1); - current_statement_begin__ = 77; - for (int j = 1; j <= num_elements(v); ++j) { - - current_statement_begin__ = 78; - stan::math::assign(prob_neg, stan::model::deep_copy((Phi((-(get_base1(v,j,"v",1)) / s)) * prob_neg))); - } - current_statement_begin__ = 80; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (pdf * cdf), - "assigning variable prob"); - current_statement_begin__ = 81; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(prob,i,"prob",1) / (1 - prob_neg))), - "assigning variable prob"); - current_statement_begin__ = 82; - if (as_bool(logical_lt(get_base1(prob,i,"prob",1),1e-10))) { - - current_statement_begin__ = 83; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 1e-10, - "assigning variable prob"); - } - } else { - - current_statement_begin__ = 87; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 1e-10, - "assigning variable prob"); - } - } - current_statement_begin__ = 90; - stan::math::assign(out, sum(stan::math::log(prob))); - current_statement_begin__ = 91; - return stan::math::promote_scalar(out); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} -template -typename boost::math::tools::promote_args::type>::type -lba_lpdf(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) { - return lba_lpdf(RT,d,A,v,s,tau, pstream__); -} - - -struct lba_lpdf_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const Eigen::Matrix& RT, - const T1__& d, - const T2__& A, - const Eigen::Matrix& v, - const T4__& s, - const T5__& tau, std::ostream* pstream__) const { - return lba_lpdf(RT, d, A, v, s, tau, pstream__); - } -}; - -template -Eigen::Matrix::type>::type, Eigen::Dynamic,1> -lba_rng(const T0__& d, - const T1__& A, - const Eigen::Matrix& v, - const T3__& s, - const T4__& tau, RNG& base_rng__, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - { - current_statement_begin__ = 96; - int get_pos_drift(0); - (void) get_pos_drift; // dummy to suppress unused var warning - - stan::math::fill(get_pos_drift, std::numeric_limits::min()); - current_statement_begin__ = 97; - int no_pos_drift(0); - (void) no_pos_drift; // dummy to suppress unused var warning - - stan::math::fill(no_pos_drift, std::numeric_limits::min()); - current_statement_begin__ = 98; - int get_first_pos(0); - (void) get_first_pos; // dummy to suppress unused var warning - - stan::math::fill(get_first_pos, std::numeric_limits::min()); - current_statement_begin__ = 99; - validate_non_negative_index("drift", "num_elements(v)", num_elements(v)); - Eigen::Matrix drift(static_cast(num_elements(v))); - (void) drift; // dummy to suppress unused var warning - - stan::math::initialize(drift, DUMMY_VAR__); - stan::math::fill(drift,DUMMY_VAR__); - current_statement_begin__ = 100; - int max_iter(0); - (void) max_iter; // dummy to suppress unused var warning - - stan::math::fill(max_iter, std::numeric_limits::min()); - current_statement_begin__ = 101; - int iter(0); - (void) iter; // dummy to suppress unused var warning - - stan::math::fill(iter, std::numeric_limits::min()); - current_statement_begin__ = 102; - validate_non_negative_index("start", "num_elements(v)", num_elements(v)); - vector start(num_elements(v)); - stan::math::initialize(start, DUMMY_VAR__); - stan::math::fill(start,DUMMY_VAR__); - current_statement_begin__ = 103; - validate_non_negative_index("ttf", "num_elements(v)", num_elements(v)); - vector ttf(num_elements(v)); - stan::math::initialize(ttf, DUMMY_VAR__); - stan::math::fill(ttf,DUMMY_VAR__); - current_statement_begin__ = 104; - validate_non_negative_index("resp", "num_elements(v)", num_elements(v)); - vector resp(num_elements(v), 0); - stan::math::fill(resp, std::numeric_limits::min()); - current_statement_begin__ = 105; - local_scalar_t__ rt; - (void) rt; // dummy to suppress unused var warning - - stan::math::initialize(rt, DUMMY_VAR__); - stan::math::fill(rt,DUMMY_VAR__); - current_statement_begin__ = 106; - validate_non_negative_index("pred", "2", 2); - Eigen::Matrix pred(static_cast(2)); - (void) pred; // dummy to suppress unused var warning - - stan::math::initialize(pred, DUMMY_VAR__); - stan::math::fill(pred,DUMMY_VAR__); - current_statement_begin__ = 107; - local_scalar_t__ b; - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - - - current_statement_begin__ = 110; - stan::math::assign(get_pos_drift, 1); - current_statement_begin__ = 111; - stan::math::assign(no_pos_drift, 0); - current_statement_begin__ = 112; - stan::math::assign(max_iter, 1000); - current_statement_begin__ = 113; - stan::math::assign(iter, 0); - current_statement_begin__ = 114; - while (as_bool(get_pos_drift)) { - - current_statement_begin__ = 115; - for (int j = 1; j <= num_elements(v); ++j) { - - current_statement_begin__ = 116; - stan::model::assign(drift, - stan::model::cons_list(stan::model::index_uni(j), stan::model::nil_index_list()), - normal_rng(get_base1(v,j,"v",1),s, base_rng__), - "assigning variable drift"); - current_statement_begin__ = 117; - if (as_bool(logical_gt(get_base1(drift,j,"drift",1),0))) { - - current_statement_begin__ = 118; - stan::math::assign(get_pos_drift, 0); - } - } - current_statement_begin__ = 121; - stan::math::assign(iter, stan::model::deep_copy((iter + 1))); - current_statement_begin__ = 122; - if (as_bool(logical_gt(iter,max_iter))) { - - current_statement_begin__ = 123; - stan::math::assign(get_pos_drift, 0); - current_statement_begin__ = 124; - stan::math::assign(no_pos_drift, 1); - } - } - current_statement_begin__ = 129; - if (as_bool(no_pos_drift)) { - - current_statement_begin__ = 130; - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - -(1), - "assigning variable pred"); - current_statement_begin__ = 131; - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - -(1), - "assigning variable pred"); - } else { - - current_statement_begin__ = 133; - stan::math::assign(b, (A + d)); - current_statement_begin__ = 134; - for (int i = 1; i <= num_elements(v); ++i) { - - current_statement_begin__ = 136; - stan::model::assign(start, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - uniform_rng(0,A, base_rng__), - "assigning variable start"); - current_statement_begin__ = 138; - stan::model::assign(ttf, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - ((b - get_base1(start,i,"start",1)) / get_base1(drift,i,"drift",1)), - "assigning variable ttf"); - } - current_statement_begin__ = 142; - stan::math::assign(resp, sort_indices_asc(ttf)); - current_statement_begin__ = 143; - stan::math::assign(ttf, stan::model::deep_copy(sort_asc(ttf))); - current_statement_begin__ = 144; - stan::math::assign(get_first_pos, 1); - current_statement_begin__ = 145; - stan::math::assign(iter, 1); - current_statement_begin__ = 146; - while (as_bool(get_first_pos)) { - - current_statement_begin__ = 147; - if (as_bool(logical_gt(get_base1(ttf,iter,"ttf",1),0))) { - - current_statement_begin__ = 148; - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (get_base1(ttf,iter,"ttf",1) + tau), - "assigning variable pred"); - current_statement_begin__ = 149; - stan::model::assign(pred, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - get_base1(resp,iter,"resp",1), - "assigning variable pred"); - current_statement_begin__ = 150; - stan::math::assign(get_first_pos, 0); - } - current_statement_begin__ = 152; - stan::math::assign(iter, stan::model::deep_copy((iter + 1))); - } - } - current_statement_begin__ = 155; - return stan::math::promote_scalar(pred); - } - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct lba_rng_functor__ { - template - Eigen::Matrix::type>::type, Eigen::Dynamic,1> - operator()(const T0__& d, - const T1__& A, - const Eigen::Matrix& v, - const T3__& s, - const T4__& tau, RNG& base_rng__, std::ostream* pstream__) const { - return lba_rng(d, A, v, s, tau, base_rng__, pstream__); - } -}; - -class model_choiceRT_lba_single : public prob_grad { -private: - int Max_tr; - int N_choices; - int N_cond; - vector N_tr_cond; - vector RT; -public: - model_choiceRT_lba_single(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_choiceRT_lba_single(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_choiceRT_lba_single_namespace::model_choiceRT_lba_single"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 159; - context__.validate_dims("data initialization", "Max_tr", "int", context__.to_vec()); - Max_tr = int(0); - vals_i__ = context__.vals_i("Max_tr"); - pos__ = 0; - Max_tr = vals_i__[pos__++]; - current_statement_begin__ = 160; - context__.validate_dims("data initialization", "N_choices", "int", context__.to_vec()); - N_choices = int(0); - vals_i__ = context__.vals_i("N_choices"); - pos__ = 0; - N_choices = vals_i__[pos__++]; - current_statement_begin__ = 161; - context__.validate_dims("data initialization", "N_cond", "int", context__.to_vec()); - N_cond = int(0); - vals_i__ = context__.vals_i("N_cond"); - pos__ = 0; - N_cond = vals_i__[pos__++]; - current_statement_begin__ = 162; - validate_non_negative_index("N_tr_cond", "N_cond", N_cond); - context__.validate_dims("data initialization", "N_tr_cond", "int", context__.to_vec(N_cond)); - validate_non_negative_index("N_tr_cond", "N_cond", N_cond); - N_tr_cond = std::vector(N_cond,int(0)); - vals_i__ = context__.vals_i("N_tr_cond"); - pos__ = 0; - size_t N_tr_cond_limit_0__ = N_cond; - for (size_t i_0__ = 0; i_0__ < N_tr_cond_limit_0__; ++i_0__) { - N_tr_cond[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 163; - validate_non_negative_index("RT", "N_cond", N_cond); - validate_non_negative_index("RT", "2", 2); - validate_non_negative_index("RT", "Max_tr", Max_tr); - context__.validate_dims("data initialization", "RT", "matrix_d", context__.to_vec(N_cond,2,Max_tr)); - validate_non_negative_index("RT", "N_cond", N_cond); - validate_non_negative_index("RT", "2", 2); - validate_non_negative_index("RT", "Max_tr", Max_tr); - RT = std::vector(N_cond,matrix_d(static_cast(2),static_cast(Max_tr))); - vals_r__ = context__.vals_r("RT"); - pos__ = 0; - size_t RT_m_mat_lim__ = 2; - size_t RT_n_mat_lim__ = Max_tr; - for (size_t n_mat__ = 0; n_mat__ < RT_n_mat_lim__; ++n_mat__) { - for (size_t m_mat__ = 0; m_mat__ < RT_m_mat_lim__; ++m_mat__) { - size_t RT_limit_0__ = N_cond; - for (size_t i_0__ = 0; i_0__ < RT_limit_0__; ++i_0__) { - RT[i_0__](m_mat__,n_mat__) = vals_r__[pos__++]; - } - } - } - - // validate, data variables - current_statement_begin__ = 159; - current_statement_begin__ = 160; - current_statement_begin__ = 161; - current_statement_begin__ = 162; - current_statement_begin__ = 163; - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 167; - ++num_params_r__; - current_statement_begin__ = 168; - ++num_params_r__; - current_statement_begin__ = 169; - ++num_params_r__; - current_statement_begin__ = 170; - validate_non_negative_index("v", "N_choices", N_choices); - validate_non_negative_index("v", "N_cond", N_cond); - num_params_r__ += N_choices * N_cond; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_choiceRT_lba_single() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("d"))) - throw std::runtime_error("variable d missing"); - vals_r__ = context__.vals_r("d"); - pos__ = 0U; - context__.validate_dims("initialization", "d", "double", context__.to_vec()); - double d(0); - d = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,d); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable d: ") + e.what()); - } - - if (!(context__.contains_r("A"))) - throw std::runtime_error("variable A missing"); - vals_r__ = context__.vals_r("A"); - pos__ = 0U; - context__.validate_dims("initialization", "A", "double", context__.to_vec()); - double A(0); - A = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,A); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A: ") + e.what()); - } - - if (!(context__.contains_r("tau"))) - throw std::runtime_error("variable tau missing"); - vals_r__ = context__.vals_r("tau"); - pos__ = 0U; - context__.validate_dims("initialization", "tau", "double", context__.to_vec()); - double tau(0); - tau = vals_r__[pos__++]; - try { - writer__.scalar_lb_unconstrain(0,tau); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau: ") + e.what()); - } - - if (!(context__.contains_r("v"))) - throw std::runtime_error("variable v missing"); - vals_r__ = context__.vals_r("v"); - pos__ = 0U; - validate_non_negative_index("v", "N_cond", N_cond); - validate_non_negative_index("v", "N_choices", N_choices); - context__.validate_dims("initialization", "v", "vector_d", context__.to_vec(N_cond,N_choices)); - std::vector v(N_cond,vector_d(static_cast(N_choices))); - for (int j1__ = 0U; j1__ < N_choices; ++j1__) - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - v[i0__](j1__) = vals_r__[pos__++]; - for (int i0__ = 0U; i0__ < N_cond; ++i0__) - try { - writer__.vector_lb_unconstrain(0,v[i0__]); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable v: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ d; - (void) d; // dummy to suppress unused var warning - if (jacobian__) - d = in__.scalar_lb_constrain(0,lp__); - else - d = in__.scalar_lb_constrain(0); - - local_scalar_t__ A; - (void) A; // dummy to suppress unused var warning - if (jacobian__) - A = in__.scalar_lb_constrain(0,lp__); - else - A = in__.scalar_lb_constrain(0); - - local_scalar_t__ tau; - (void) tau; // dummy to suppress unused var warning - if (jacobian__) - tau = in__.scalar_lb_constrain(0,lp__); - else - tau = in__.scalar_lb_constrain(0); - - vector > v; - size_t dim_v_0__ = N_cond; - v.reserve(dim_v_0__); - for (size_t k_0__ = 0; k_0__ < dim_v_0__; ++k_0__) { - if (jacobian__) - v.push_back(in__.vector_lb_constrain(0,N_choices,lp__)); - else - v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - - - // transformed parameters - current_statement_begin__ = 173; - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - - - current_statement_begin__ = 174; - stan::math::assign(s, 1); - - // validate transformed parameters - if (stan::math::is_uninitialized(s)) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: s"; - throw std::runtime_error(msg__.str()); - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 173; - - // model body - { - current_statement_begin__ = 178; - int n_trials(0); - (void) n_trials; // dummy to suppress unused var warning - - stan::math::fill(n_trials, std::numeric_limits::min()); - - - current_statement_begin__ = 181; - lp_accum__.add(normal_log(d, 0.5, 1)); - if (d < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 1)); - current_statement_begin__ = 182; - lp_accum__.add(normal_log(A, 0.5, 1)); - if (A < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 1)); - current_statement_begin__ = 183; - lp_accum__.add(normal_log(tau, 0.5, 0.5)); - if (tau < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 0.5, 0.5)); - current_statement_begin__ = 185; - for (int j = 1; j <= N_cond; ++j) { - - current_statement_begin__ = 187; - stan::math::assign(n_trials, get_base1(N_tr_cond,j,"N_tr_cond",1)); - current_statement_begin__ = 189; - for (int n = 1; n <= N_choices; ++n) { - - current_statement_begin__ = 191; - lp_accum__.add(normal_log(get_base1(get_base1(v,j,"v",1),n,"v",2), 2, 1)); - if (get_base1(get_base1(v,j,"v",1),n,"v",2) < 0) lp_accum__.add(-std::numeric_limits::infinity()); - else lp_accum__.add(-normal_ccdf_log(0, 2, 1)); - } - current_statement_begin__ = 194; - lp_accum__.add(lba_lpdf(stan::model::rvalue(RT, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_min_max(1, n_trials), stan::model::nil_index_list()))), "RT"), d, A, stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "v"), s, tau, pstream__)); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("d"); - names__.push_back("A"); - names__.push_back("tau"); - names__.push_back("v"); - names__.push_back("s"); - names__.push_back("n_trials"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N_cond); - dims__.push_back(N_choices); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N_cond); - dims__.push_back(2); - dims__.push_back(Max_tr); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_choiceRT_lba_single_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double d = in__.scalar_lb_constrain(0); - double A = in__.scalar_lb_constrain(0); - double tau = in__.scalar_lb_constrain(0); - vector v; - size_t dim_v_0__ = N_cond; - for (size_t k_0__ = 0; k_0__ < dim_v_0__; ++k_0__) { - v.push_back(in__.vector_lb_constrain(0,N_choices)); - } - vars__.push_back(d); - vars__.push_back(A); - vars__.push_back(tau); - for (int k_1__ = 0; k_1__ < N_choices; ++k_1__) { - for (int k_0__ = 0; k_0__ < N_cond; ++k_0__) { - vars__.push_back(v[k_0__][k_1__]); - } - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 173; - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - - - current_statement_begin__ = 174; - stan::math::assign(s, 1); - - // validate transformed parameters - current_statement_begin__ = 173; - - // write transformed parameters - if (include_tparams__) { - vars__.push_back(s); - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 200; - int n_trials(0); - (void) n_trials; // dummy to suppress unused var warning - - stan::math::fill(n_trials, std::numeric_limits::min()); - current_statement_begin__ = 203; - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 206; - validate_non_negative_index("y_pred", "2", 2); - validate_non_negative_index("y_pred", "Max_tr", Max_tr); - validate_non_negative_index("y_pred", "N_cond", N_cond); - vector > y_pred(N_cond, (Eigen::Matrix (static_cast(2),static_cast(Max_tr)))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 209; - for (int j = 1; j <= N_cond; ++j) { - - current_statement_begin__ = 210; - for (int t = 1; t <= Max_tr; ++t) { - - current_statement_begin__ = 211; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - rep_vector(-(1),2), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 216; - stan::math::assign(log_lik, 0); - - current_statement_begin__ = 219; - for (int j = 1; j <= N_cond; ++j) { - - current_statement_begin__ = 221; - stan::math::assign(n_trials, get_base1(N_tr_cond,j,"N_tr_cond",1)); - current_statement_begin__ = 224; - stan::math::assign(log_lik, stan::model::deep_copy((log_lik + lba_lpdf(stan::model::rvalue(RT, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_min_max(1, n_trials), stan::model::nil_index_list()))), "RT"),d,A,stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "v"),s,tau, pstream__)))); - current_statement_begin__ = 226; - for (int t = 1; t <= n_trials; ++t) { - - current_statement_begin__ = 228; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - lba_rng(d,A,stan::model::rvalue(v, stan::model::cons_list(stan::model::index_uni(j), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list())), "v"),s,tau, base_rng__, pstream__), - "assigning variable y_pred"); - } - } - - // validate generated quantities - current_statement_begin__ = 200; - current_statement_begin__ = 203; - current_statement_begin__ = 206; - - // write generated quantities - vars__.push_back(n_trials); - vars__.push_back(log_lik); - for (int k_2__ = 0; k_2__ < Max_tr; ++k_2__) { - for (int k_1__ = 0; k_1__ < 2; ++k_1__) { - for (int k_0__ = 0; k_0__ < N_cond; ++k_0__) { - vars__.push_back(y_pred[k_0__](k_1__, k_2__)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_choiceRT_lba_single"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "n_trials"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_2__ = 1; k_2__ <= Max_tr; ++k_2__) { - for (int k_1__ = 1; k_1__ <= 2; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "d"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_1__ = 1; k_1__ <= N_choices; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "v" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "n_trials"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_2__ = 1; k_2__ <= Max_tr; ++k_2__) { - for (int k_1__ = 1; k_1__ <= 2; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N_cond; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_cra_exp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_cra_exp"); - reader.add_event(120, 118, "end", "model_cra_exp"); - return reader; -} - -template -typename boost::math::tools::promote_args::type>::type -subjective_value(const T0__& alpha, - const T1__& beta, - const T2__& p, - const T3__& a, - const T4__& v, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - - current_statement_begin__ = 12; - return stan::math::promote_scalar((pow(p,(1 + (beta * a))) * pow(v,alpha))); - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct subjective_value_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& alpha, - const T1__& beta, - const T2__& p, - const T3__& a, - const T4__& v, std::ostream* pstream__) const { - return subjective_value(alpha, beta, p, a, v, pstream__); - } -}; - -class model_cra_exp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > prob; - vector > ambig; - vector > reward_var; - vector > reward_fix; -public: - model_cra_exp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_cra_exp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_cra_exp_namespace::model_cra_exp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 17; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 18; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 19; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 21; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 22; - validate_non_negative_index("prob", "N", N); - validate_non_negative_index("prob", "T", T); - context__.validate_dims("data initialization", "prob", "double", context__.to_vec(N,T)); - validate_non_negative_index("prob", "N", N); - validate_non_negative_index("prob", "T", T); - prob = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("prob"); - pos__ = 0; - size_t prob_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < prob_limit_1__; ++i_1__) { - size_t prob_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < prob_limit_0__; ++i_0__) { - prob[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 23; - validate_non_negative_index("ambig", "N", N); - validate_non_negative_index("ambig", "T", T); - context__.validate_dims("data initialization", "ambig", "double", context__.to_vec(N,T)); - validate_non_negative_index("ambig", "N", N); - validate_non_negative_index("ambig", "T", T); - ambig = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("ambig"); - pos__ = 0; - size_t ambig_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < ambig_limit_1__; ++i_1__) { - size_t ambig_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < ambig_limit_0__; ++i_0__) { - ambig[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 24; - validate_non_negative_index("reward_var", "N", N); - validate_non_negative_index("reward_var", "T", T); - context__.validate_dims("data initialization", "reward_var", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward_var", "N", N); - validate_non_negative_index("reward_var", "T", T); - reward_var = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward_var"); - pos__ = 0; - size_t reward_var_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_var_limit_1__; ++i_1__) { - size_t reward_var_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_var_limit_0__; ++i_0__) { - reward_var[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 25; - validate_non_negative_index("reward_fix", "N", N); - validate_non_negative_index("reward_fix", "T", T); - context__.validate_dims("data initialization", "reward_fix", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward_fix", "N", N); - validate_non_negative_index("reward_fix", "T", T); - reward_fix = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward_fix"); - pos__ = 0; - size_t reward_fix_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_fix_limit_1__; ++i_1__) { - size_t reward_fix_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_fix_limit_0__; ++i_0__) { - reward_fix[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 17; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 18; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 19; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 21; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],0); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - current_statement_begin__ = 22; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"prob[k0__][k1__]",prob[k0__][k1__],0); - check_less_or_equal(function__,"prob[k0__][k1__]",prob[k0__][k1__],1); - } - } - current_statement_begin__ = 23; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"ambig[k0__][k1__]",ambig[k0__][k1__],0); - check_less_or_equal(function__,"ambig[k0__][k1__]",ambig[k0__][k1__],1); - } - } - current_statement_begin__ = 24; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward_var[k0__][k1__]",reward_var[k0__][k1__],0); - } - } - current_statement_begin__ = 25; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward_fix[k0__][k1__]",reward_fix[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 31; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 32; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 35; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 36; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 37; - validate_non_negative_index("gamma_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_cra_exp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("gamma_pr"))) - throw std::runtime_error("variable gamma_pr missing"); - vals_r__ = context__.vals_r("gamma_pr"); - pos__ = 0U; - validate_non_negative_index("gamma_pr", "N", N); - context__.validate_dims("initialization", "gamma_pr", "vector_d", context__.to_vec(N)); - vector_d gamma_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - gamma_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(gamma_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable gamma_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - Eigen::Matrix gamma_pr; - (void) gamma_pr; // dummy to suppress unused var warning - if (jacobian__) - gamma_pr = in__.vector_constrain(N,lp__); - else - gamma_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 42; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 43; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - current_statement_begin__ = 44; - validate_non_negative_index("gamma", "N", N); - Eigen::Matrix gamma(static_cast(N)); - (void) gamma; // dummy to suppress unused var warning - - stan::math::initialize(gamma, DUMMY_VAR__); - stan::math::fill(gamma,DUMMY_VAR__); - - - current_statement_begin__ = 46; - stan::math::assign(alpha, multiply(Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr))),2)); - current_statement_begin__ = 47; - stan::math::assign(beta, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),beta_pr))); - current_statement_begin__ = 48; - stan::math::assign(gamma, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gamma_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(gamma(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: gamma" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 42; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 43; - current_statement_begin__ = 44; - check_greater_or_equal(function__,"gamma",gamma,0); - - // model body - - current_statement_begin__ = 53; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(sigma, 0, 5)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(gamma_pr, 0, 1)); - current_statement_begin__ = 61; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 62; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 63; - local_scalar_t__ u_fix; - (void) u_fix; // dummy to suppress unused var warning - - stan::math::initialize(u_fix, DUMMY_VAR__); - stan::math::fill(u_fix,DUMMY_VAR__); - current_statement_begin__ = 64; - local_scalar_t__ u_var; - (void) u_var; // dummy to suppress unused var warning - - stan::math::initialize(u_var, DUMMY_VAR__); - stan::math::fill(u_var,DUMMY_VAR__); - current_statement_begin__ = 65; - local_scalar_t__ p_var; - (void) p_var; // dummy to suppress unused var warning - - stan::math::initialize(p_var, DUMMY_VAR__); - stan::math::fill(p_var,DUMMY_VAR__); - - - current_statement_begin__ = 67; - stan::math::assign(u_fix, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),0.5,0,get_base1(get_base1(reward_fix,i,"reward_fix",1),t,"reward_fix",2), pstream__)); - current_statement_begin__ = 68; - stan::math::assign(u_var, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),get_base1(get_base1(prob,i,"prob",1),t,"prob",2),get_base1(get_base1(ambig,i,"ambig",1),t,"ambig",2),get_base1(get_base1(reward_var,i,"reward_var",1),t,"reward_var",2), pstream__)); - current_statement_begin__ = 69; - stan::math::assign(p_var, inv_logit((get_base1(gamma,i,"gamma",1) * (u_var - u_fix)))); - current_statement_begin__ = 71; - lp_accum__.add(bernoulli_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),p_var)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("gamma_pr"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("gamma"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("mu_gamma"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_cra_exp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - vector_d gamma_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gamma_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 42; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 43; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - current_statement_begin__ = 44; - validate_non_negative_index("gamma", "N", N); - Eigen::Matrix gamma(static_cast(N)); - (void) gamma; // dummy to suppress unused var warning - - stan::math::initialize(gamma, DUMMY_VAR__); - stan::math::fill(gamma,DUMMY_VAR__); - - - current_statement_begin__ = 46; - stan::math::assign(alpha, multiply(Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr))),2)); - current_statement_begin__ = 47; - stan::math::assign(beta, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),beta_pr))); - current_statement_begin__ = 48; - stan::math::assign(gamma, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gamma_pr)))); - - // validate transformed parameters - current_statement_begin__ = 42; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 43; - current_statement_begin__ = 44; - check_greater_or_equal(function__,"gamma",gamma,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gamma[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 78; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 80; - local_scalar_t__ mu_gamma; - (void) mu_gamma; // dummy to suppress unused var warning - - stan::math::initialize(mu_gamma, DUMMY_VAR__); - stan::math::fill(mu_gamma,DUMMY_VAR__); - current_statement_begin__ = 83; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 86; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 89; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 90; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 91; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 95; - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - current_statement_begin__ = 96; - stan::math::assign(mu_beta, get_base1(mu_p,2,"mu_p",1)); - current_statement_begin__ = 97; - stan::math::assign(mu_gamma, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - current_statement_begin__ = 100; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 102; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 104; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 105; - local_scalar_t__ u_fix; - (void) u_fix; // dummy to suppress unused var warning - - stan::math::initialize(u_fix, DUMMY_VAR__); - stan::math::fill(u_fix,DUMMY_VAR__); - current_statement_begin__ = 106; - local_scalar_t__ u_var; - (void) u_var; // dummy to suppress unused var warning - - stan::math::initialize(u_var, DUMMY_VAR__); - stan::math::fill(u_var,DUMMY_VAR__); - current_statement_begin__ = 107; - local_scalar_t__ p_var; - (void) p_var; // dummy to suppress unused var warning - - stan::math::initialize(p_var, DUMMY_VAR__); - stan::math::fill(p_var,DUMMY_VAR__); - - - current_statement_begin__ = 109; - stan::math::assign(u_fix, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),0.5,0,get_base1(get_base1(reward_fix,i,"reward_fix",1),t,"reward_fix",2), pstream__)); - current_statement_begin__ = 110; - stan::math::assign(u_var, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),get_base1(get_base1(prob,i,"prob",1),t,"prob",2),get_base1(get_base1(ambig,i,"ambig",1),t,"ambig",2),get_base1(get_base1(reward_var,i,"reward_var",1),t,"reward_var",2), pstream__)); - current_statement_begin__ = 111; - stan::math::assign(p_var, inv_logit((get_base1(gamma,i,"gamma",1) * (u_var - u_fix)))); - current_statement_begin__ = 113; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),p_var)), - "assigning variable log_lik"); - current_statement_begin__ = 114; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(p_var, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 78; - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - current_statement_begin__ = 79; - current_statement_begin__ = 80; - check_greater_or_equal(function__,"mu_gamma",mu_gamma,0); - current_statement_begin__ = 83; - current_statement_begin__ = 86; - - // write generated quantities - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - vars__.push_back(mu_gamma); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_cra_exp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gamma"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gamma"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_cra_linear_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_cra_linear"); - reader.add_event(120, 118, "end", "model_cra_linear"); - return reader; -} - -template -typename boost::math::tools::promote_args::type>::type -subjective_value(const T0__& alpha, - const T1__& beta, - const T2__& p, - const T3__& a, - const T4__& v, std::ostream* pstream__) { - typedef typename boost::math::tools::promote_args::type>::type local_scalar_t__; - typedef local_scalar_t__ fun_return_scalar_t__; - const static bool propto__ = true; - (void) propto__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - int current_statement_begin__ = -1; - try { - - current_statement_begin__ = 12; - return stan::math::promote_scalar(((p - ((beta * a) / 2)) * pow(v,alpha))); - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } -} - - -struct subjective_value_functor__ { - template - typename boost::math::tools::promote_args::type>::type - operator()(const T0__& alpha, - const T1__& beta, - const T2__& p, - const T3__& a, - const T4__& v, std::ostream* pstream__) const { - return subjective_value(alpha, beta, p, a, v, pstream__); - } -}; - -class model_cra_linear : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > prob; - vector > ambig; - vector > reward_var; - vector > reward_fix; -public: - model_cra_linear(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_cra_linear(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_cra_linear_namespace::model_cra_linear"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 17; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 18; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 19; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 21; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 22; - validate_non_negative_index("prob", "N", N); - validate_non_negative_index("prob", "T", T); - context__.validate_dims("data initialization", "prob", "double", context__.to_vec(N,T)); - validate_non_negative_index("prob", "N", N); - validate_non_negative_index("prob", "T", T); - prob = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("prob"); - pos__ = 0; - size_t prob_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < prob_limit_1__; ++i_1__) { - size_t prob_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < prob_limit_0__; ++i_0__) { - prob[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 23; - validate_non_negative_index("ambig", "N", N); - validate_non_negative_index("ambig", "T", T); - context__.validate_dims("data initialization", "ambig", "double", context__.to_vec(N,T)); - validate_non_negative_index("ambig", "N", N); - validate_non_negative_index("ambig", "T", T); - ambig = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("ambig"); - pos__ = 0; - size_t ambig_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < ambig_limit_1__; ++i_1__) { - size_t ambig_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < ambig_limit_0__; ++i_0__) { - ambig[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 24; - validate_non_negative_index("reward_var", "N", N); - validate_non_negative_index("reward_var", "T", T); - context__.validate_dims("data initialization", "reward_var", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward_var", "N", N); - validate_non_negative_index("reward_var", "T", T); - reward_var = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward_var"); - pos__ = 0; - size_t reward_var_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_var_limit_1__; ++i_1__) { - size_t reward_var_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_var_limit_0__; ++i_0__) { - reward_var[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 25; - validate_non_negative_index("reward_fix", "N", N); - validate_non_negative_index("reward_fix", "T", T); - context__.validate_dims("data initialization", "reward_fix", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward_fix", "N", N); - validate_non_negative_index("reward_fix", "T", T); - reward_fix = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward_fix"); - pos__ = 0; - size_t reward_fix_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_fix_limit_1__; ++i_1__) { - size_t reward_fix_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_fix_limit_0__; ++i_0__) { - reward_fix[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 17; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 18; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 19; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 21; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],0); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - current_statement_begin__ = 22; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"prob[k0__][k1__]",prob[k0__][k1__],0); - check_less_or_equal(function__,"prob[k0__][k1__]",prob[k0__][k1__],1); - } - } - current_statement_begin__ = 23; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"ambig[k0__][k1__]",ambig[k0__][k1__],0); - check_less_or_equal(function__,"ambig[k0__][k1__]",ambig[k0__][k1__],1); - } - } - current_statement_begin__ = 24; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward_var[k0__][k1__]",reward_var[k0__][k1__],0); - } - } - current_statement_begin__ = 25; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward_fix[k0__][k1__]",reward_fix[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 31; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 32; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 35; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 36; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 37; - validate_non_negative_index("gamma_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_cra_linear() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("gamma_pr"))) - throw std::runtime_error("variable gamma_pr missing"); - vals_r__ = context__.vals_r("gamma_pr"); - pos__ = 0U; - validate_non_negative_index("gamma_pr", "N", N); - context__.validate_dims("initialization", "gamma_pr", "vector_d", context__.to_vec(N)); - vector_d gamma_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - gamma_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(gamma_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable gamma_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - Eigen::Matrix gamma_pr; - (void) gamma_pr; // dummy to suppress unused var warning - if (jacobian__) - gamma_pr = in__.vector_constrain(N,lp__); - else - gamma_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 42; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 43; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - current_statement_begin__ = 44; - validate_non_negative_index("gamma", "N", N); - Eigen::Matrix gamma(static_cast(N)); - (void) gamma; // dummy to suppress unused var warning - - stan::math::initialize(gamma, DUMMY_VAR__); - stan::math::fill(gamma,DUMMY_VAR__); - - - current_statement_begin__ = 46; - stan::math::assign(alpha, multiply(Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr))),2)); - current_statement_begin__ = 47; - stan::math::assign(beta, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),beta_pr))); - current_statement_begin__ = 48; - stan::math::assign(gamma, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gamma_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(gamma(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: gamma" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 42; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 43; - current_statement_begin__ = 44; - check_greater_or_equal(function__,"gamma",gamma,0); - - // model body - - current_statement_begin__ = 53; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(sigma, 0, 5)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(gamma_pr, 0, 1)); - current_statement_begin__ = 61; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 62; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 63; - local_scalar_t__ u_fix; - (void) u_fix; // dummy to suppress unused var warning - - stan::math::initialize(u_fix, DUMMY_VAR__); - stan::math::fill(u_fix,DUMMY_VAR__); - current_statement_begin__ = 64; - local_scalar_t__ u_var; - (void) u_var; // dummy to suppress unused var warning - - stan::math::initialize(u_var, DUMMY_VAR__); - stan::math::fill(u_var,DUMMY_VAR__); - current_statement_begin__ = 65; - local_scalar_t__ p_var; - (void) p_var; // dummy to suppress unused var warning - - stan::math::initialize(p_var, DUMMY_VAR__); - stan::math::fill(p_var,DUMMY_VAR__); - - - current_statement_begin__ = 67; - stan::math::assign(u_fix, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),0.5,0,get_base1(get_base1(reward_fix,i,"reward_fix",1),t,"reward_fix",2), pstream__)); - current_statement_begin__ = 68; - stan::math::assign(u_var, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),get_base1(get_base1(prob,i,"prob",1),t,"prob",2),get_base1(get_base1(ambig,i,"ambig",1),t,"ambig",2),get_base1(get_base1(reward_var,i,"reward_var",1),t,"reward_var",2), pstream__)); - current_statement_begin__ = 69; - stan::math::assign(p_var, inv_logit((get_base1(gamma,i,"gamma",1) * (u_var - u_fix)))); - current_statement_begin__ = 71; - lp_accum__.add(bernoulli_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),p_var)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("gamma_pr"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("gamma"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("mu_gamma"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_cra_linear_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - vector_d gamma_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gamma_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 42; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 43; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - current_statement_begin__ = 44; - validate_non_negative_index("gamma", "N", N); - Eigen::Matrix gamma(static_cast(N)); - (void) gamma; // dummy to suppress unused var warning - - stan::math::initialize(gamma, DUMMY_VAR__); - stan::math::fill(gamma,DUMMY_VAR__); - - - current_statement_begin__ = 46; - stan::math::assign(alpha, multiply(Phi_approx(add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pr))),2)); - current_statement_begin__ = 47; - stan::math::assign(beta, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),beta_pr))); - current_statement_begin__ = 48; - stan::math::assign(gamma, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),gamma_pr)))); - - // validate transformed parameters - current_statement_begin__ = 42; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 43; - current_statement_begin__ = 44; - check_greater_or_equal(function__,"gamma",gamma,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gamma[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 78; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 80; - local_scalar_t__ mu_gamma; - (void) mu_gamma; // dummy to suppress unused var warning - - stan::math::initialize(mu_gamma, DUMMY_VAR__); - stan::math::fill(mu_gamma,DUMMY_VAR__); - current_statement_begin__ = 83; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 86; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 89; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 90; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 91; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 95; - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - current_statement_begin__ = 96; - stan::math::assign(mu_beta, get_base1(mu_p,2,"mu_p",1)); - current_statement_begin__ = 97; - stan::math::assign(mu_gamma, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - current_statement_begin__ = 100; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 102; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 104; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 105; - local_scalar_t__ u_fix; - (void) u_fix; // dummy to suppress unused var warning - - stan::math::initialize(u_fix, DUMMY_VAR__); - stan::math::fill(u_fix,DUMMY_VAR__); - current_statement_begin__ = 106; - local_scalar_t__ u_var; - (void) u_var; // dummy to suppress unused var warning - - stan::math::initialize(u_var, DUMMY_VAR__); - stan::math::fill(u_var,DUMMY_VAR__); - current_statement_begin__ = 107; - local_scalar_t__ p_var; - (void) p_var; // dummy to suppress unused var warning - - stan::math::initialize(p_var, DUMMY_VAR__); - stan::math::fill(p_var,DUMMY_VAR__); - - - current_statement_begin__ = 109; - stan::math::assign(u_fix, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),0.5,0,get_base1(get_base1(reward_fix,i,"reward_fix",1),t,"reward_fix",2), pstream__)); - current_statement_begin__ = 110; - stan::math::assign(u_var, subjective_value(get_base1(alpha,i,"alpha",1),get_base1(beta,i,"beta",1),get_base1(get_base1(prob,i,"prob",1),t,"prob",2),get_base1(get_base1(ambig,i,"ambig",1),t,"ambig",2),get_base1(get_base1(reward_var,i,"reward_var",1),t,"reward_var",2), pstream__)); - current_statement_begin__ = 111; - stan::math::assign(p_var, inv_logit((get_base1(gamma,i,"gamma",1) * (u_var - u_fix)))); - current_statement_begin__ = 113; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),p_var)), - "assigning variable log_lik"); - current_statement_begin__ = 114; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(p_var, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 78; - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - current_statement_begin__ = 79; - current_statement_begin__ = 80; - check_greater_or_equal(function__,"mu_gamma",mu_gamma,0); - current_statement_begin__ = 83; - current_statement_begin__ = 86; - - // write generated quantities - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - vars__.push_back(mu_gamma); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_cra_linear"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gamma"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gamma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gamma"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_cs_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_cs"); - reader.add_event(106, 104, "end", "model_dd_cs"); - return reader; -} - -class model_dd_cs : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > delay_later; - vector > amount_later; - vector > delay_sooner; - vector > amount_sooner; - vector > choice; -public: - model_dd_cs(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_cs(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_cs_namespace::model_dd_cs"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - delay_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_later_limit_1__; ++i_1__) { - size_t delay_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - amount_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_later_limit_1__; ++i_1__) { - size_t amount_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - delay_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_sooner_limit_1__; ++i_1__) { - size_t delay_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - amount_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_sooner_limit_1__; ++i_1__) { - size_t amount_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 9; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_later[k0__][k1__]",delay_later[k0__][k1__],0); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_later[k0__][k1__]",amount_later[k0__][k1__],0); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_sooner[k0__][k1__]",delay_sooner[k0__][k1__],0); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_sooner[k0__][k1__]",amount_sooner[k0__][k1__],0); - } - } - current_statement_begin__ = 9; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 18; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 19; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 22; - validate_non_negative_index("r_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("s_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_cs() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("r_pr"))) - throw std::runtime_error("variable r_pr missing"); - vals_r__ = context__.vals_r("r_pr"); - pos__ = 0U; - validate_non_negative_index("r_pr", "N", N); - context__.validate_dims("initialization", "r_pr", "vector_d", context__.to_vec(N)); - vector_d r_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - r_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(r_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable r_pr: ") + e.what()); - } - - if (!(context__.contains_r("s_pr"))) - throw std::runtime_error("variable s_pr missing"); - vals_r__ = context__.vals_r("s_pr"); - pos__ = 0U; - validate_non_negative_index("s_pr", "N", N); - context__.validate_dims("initialization", "s_pr", "vector_d", context__.to_vec(N)); - vector_d s_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - s_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(s_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable s_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix r_pr; - (void) r_pr; // dummy to suppress unused var warning - if (jacobian__) - r_pr = in__.vector_constrain(N,lp__); - else - r_pr = in__.vector_constrain(N); - - Eigen::Matrix s_pr; - (void) s_pr; // dummy to suppress unused var warning - if (jacobian__) - s_pr = in__.vector_constrain(N,lp__); - else - s_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 29; - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("s", "N", N); - Eigen::Matrix s(static_cast(N)); - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 33; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 34; - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - current_statement_begin__ = 35; - stan::model::assign(s, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(s_pr,i,"s_pr",1)))) * 10), - "assigning variable s"); - current_statement_begin__ = 36; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(r(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: r" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(s(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: s" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 29; - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"s",s,0); - check_less_or_equal(function__,"s",s,10); - current_statement_begin__ = 31; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - current_statement_begin__ = 43; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(r_pr, 0, 1)); - current_statement_begin__ = 48; - lp_accum__.add(normal_log(s_pr, 0, 1)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 51; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 53; - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 54; - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 56; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 57; - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) * stan::math::exp((-(1) * pow((get_base1(r,i,"r",1) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2)),get_base1(s,i,"s",1)))))); - current_statement_begin__ = 58; - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) * stan::math::exp((-(1) * pow((get_base1(r,i,"r",1) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2)),get_base1(s,i,"s",1)))))); - current_statement_begin__ = 59; - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), (get_base1(beta,i,"beta",1) * (ev_later - ev_sooner)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("r_pr"); - names__.push_back("s_pr"); - names__.push_back("beta_pr"); - names__.push_back("r"); - names__.push_back("s"); - names__.push_back("beta"); - names__.push_back("mu_r"); - names__.push_back("mu_s"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_cs_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d r_pr = in__.vector_constrain(N); - vector_d s_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(s_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 29; - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("s", "N", N); - Eigen::Matrix s(static_cast(N)); - (void) s; // dummy to suppress unused var warning - - stan::math::initialize(s, DUMMY_VAR__); - stan::math::fill(s,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 33; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 34; - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - current_statement_begin__ = 35; - stan::model::assign(s, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(s_pr,i,"s_pr",1)))) * 10), - "assigning variable s"); - current_statement_begin__ = 36; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - current_statement_begin__ = 29; - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"s",s,0); - check_less_or_equal(function__,"s",s,10); - current_statement_begin__ = 31; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(s[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 65; - local_scalar_t__ mu_r; - (void) mu_r; // dummy to suppress unused var warning - - stan::math::initialize(mu_r, DUMMY_VAR__); - stan::math::fill(mu_r,DUMMY_VAR__); - current_statement_begin__ = 66; - local_scalar_t__ mu_s; - (void) mu_s; // dummy to suppress unused var warning - - stan::math::initialize(mu_s, DUMMY_VAR__); - stan::math::fill(mu_s,DUMMY_VAR__); - current_statement_begin__ = 67; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 70; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 73; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 76; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 77; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 78; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 82; - stan::math::assign(mu_r, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 83; - stan::math::assign(mu_s, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 10)); - current_statement_begin__ = 84; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - - current_statement_begin__ = 87; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 89; - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 90; - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 92; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 94; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 95; - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) * stan::math::exp((-(1) * pow((get_base1(r,i,"r",1) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2)),get_base1(s,i,"s",1)))))); - current_statement_begin__ = 96; - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) * stan::math::exp((-(1) * pow((get_base1(r,i,"r",1) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2)),get_base1(s,i,"s",1)))))); - current_statement_begin__ = 97; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))))), - "assigning variable log_lik"); - current_statement_begin__ = 100; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))), base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 65; - check_greater_or_equal(function__,"mu_r",mu_r,0); - check_less_or_equal(function__,"mu_r",mu_r,1); - current_statement_begin__ = 66; - check_greater_or_equal(function__,"mu_s",mu_s,0); - check_less_or_equal(function__,"mu_s",mu_s,10); - current_statement_begin__ = 67; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - current_statement_begin__ = 70; - current_statement_begin__ = 73; - - // write generated quantities - vars__.push_back(mu_r); - vars__.push_back(mu_s); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_cs"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_s"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "s" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_s"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_cs_single_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_cs_single"); - reader.add_event(62, 60, "end", "model_dd_cs_single"); - return reader; -} - -class model_dd_cs_single : public prob_grad { -private: - int Tsubj; - vector delay_later; - vector amount_later; - vector delay_sooner; - vector amount_sooner; - vector choice; -public: - model_dd_cs_single(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_cs_single(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_cs_single_namespace::model_dd_cs_single"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec()); - Tsubj = int(0); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - Tsubj = vals_i__[pos__++]; - current_statement_begin__ = 3; - validate_non_negative_index("delay_later", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("delay_later", "Tsubj", Tsubj); - delay_later = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 4; - validate_non_negative_index("amount_later", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("amount_later", "Tsubj", Tsubj); - amount_later = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("delay_sooner", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("delay_sooner", "Tsubj", Tsubj); - delay_sooner = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("amount_sooner", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("amount_sooner", "Tsubj", Tsubj); - amount_sooner = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 7; - validate_non_negative_index("choice", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(Tsubj)); - validate_non_negative_index("choice", "Tsubj", Tsubj); - choice = std::vector(Tsubj,int(0)); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__] = vals_i__[pos__++]; - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"Tsubj",Tsubj,1); - current_statement_begin__ = 3; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"delay_later[k0__]",delay_later[k0__],0); - } - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"amount_later[k0__]",amount_later[k0__],0); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"delay_sooner[k0__]",delay_sooner[k0__],0); - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"amount_sooner[k0__]",amount_sooner[k0__],0); - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"choice[k0__]",choice[k0__],-(1)); - check_less_or_equal(function__,"choice[k0__]",choice[k0__],1); - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 14; - ++num_params_r__; - current_statement_begin__ = 15; - ++num_params_r__; - current_statement_begin__ = 16; - ++num_params_r__; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_cs_single() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("r"))) - throw std::runtime_error("variable r missing"); - vals_r__ = context__.vals_r("r"); - pos__ = 0U; - context__.validate_dims("initialization", "r", "double", context__.to_vec()); - double r(0); - r = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,1,r); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable r: ") + e.what()); - } - - if (!(context__.contains_r("s"))) - throw std::runtime_error("variable s missing"); - vals_r__ = context__.vals_r("s"); - pos__ = 0U; - context__.validate_dims("initialization", "s", "double", context__.to_vec()); - double s(0); - s = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,10,s); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable s: ") + e.what()); - } - - if (!(context__.contains_r("beta"))) - throw std::runtime_error("variable beta missing"); - vals_r__ = context__.vals_r("beta"); - pos__ = 0U; - context__.validate_dims("initialization", "beta", "double", context__.to_vec()); - double beta(0); - beta = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,5,beta); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ r; - (void) r; // dummy to suppress unused var warning - if (jacobian__) - r = in__.scalar_lub_constrain(0,1,lp__); - else - r = in__.scalar_lub_constrain(0,1); - - local_scalar_t__ s; - (void) s; // dummy to suppress unused var warning - if (jacobian__) - s = in__.scalar_lub_constrain(0,10,lp__); - else - s = in__.scalar_lub_constrain(0,10); - - local_scalar_t__ beta; - (void) beta; // dummy to suppress unused var warning - if (jacobian__) - beta = in__.scalar_lub_constrain(0,5,lp__); - else - beta = in__.scalar_lub_constrain(0,5); - - - // transformed parameters - current_statement_begin__ = 20; - validate_non_negative_index("ev_later", "Tsubj", Tsubj); - vector ev_later(Tsubj); - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 21; - validate_non_negative_index("ev_sooner", "Tsubj", Tsubj); - vector ev_sooner(Tsubj); - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 23; - for (int t = 1; t <= Tsubj; ++t) { - - current_statement_begin__ = 24; - stan::model::assign(ev_later, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_later,t,"amount_later",1) * stan::math::exp((-(1) * pow((r * get_base1(delay_later,t,"delay_later",1)),s)))), - "assigning variable ev_later"); - current_statement_begin__ = 25; - stan::model::assign(ev_sooner, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_sooner,t,"amount_sooner",1) * stan::math::exp((-(1) * pow((r * get_base1(delay_sooner,t,"delay_sooner",1)),s)))), - "assigning variable ev_sooner"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < Tsubj; ++i0__) { - if (stan::math::is_uninitialized(ev_later[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ev_later" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < Tsubj; ++i0__) { - if (stan::math::is_uninitialized(ev_sooner[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ev_sooner" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 20; - current_statement_begin__ = 21; - - // model body - - current_statement_begin__ = 32; - lp_accum__.add(uniform_log(r, 0, 1)); - current_statement_begin__ = 33; - lp_accum__.add(uniform_log(s, 0, 10)); - current_statement_begin__ = 34; - lp_accum__.add(uniform_log(beta, 0, 5)); - current_statement_begin__ = 36; - for (int t = 1; t <= Tsubj; ++t) { - - current_statement_begin__ = 37; - lp_accum__.add(bernoulli_logit_log(get_base1(choice,t,"choice",1), (beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1))))); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("r"); - names__.push_back("s"); - names__.push_back("beta"); - names__.push_back("ev_later"); - names__.push_back("ev_sooner"); - names__.push_back("logR"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_cs_single_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double r = in__.scalar_lub_constrain(0,1); - double s = in__.scalar_lub_constrain(0,10); - double beta = in__.scalar_lub_constrain(0,5); - vars__.push_back(r); - vars__.push_back(s); - vars__.push_back(beta); - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 20; - validate_non_negative_index("ev_later", "Tsubj", Tsubj); - vector ev_later(Tsubj); - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 21; - validate_non_negative_index("ev_sooner", "Tsubj", Tsubj); - vector ev_sooner(Tsubj); - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 23; - for (int t = 1; t <= Tsubj; ++t) { - - current_statement_begin__ = 24; - stan::model::assign(ev_later, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_later,t,"amount_later",1) * stan::math::exp((-(1) * pow((r * get_base1(delay_later,t,"delay_later",1)),s)))), - "assigning variable ev_later"); - current_statement_begin__ = 25; - stan::model::assign(ev_sooner, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_sooner,t,"amount_sooner",1) * stan::math::exp((-(1) * pow((r * get_base1(delay_sooner,t,"delay_sooner",1)),s)))), - "assigning variable ev_sooner"); - } - - // validate transformed parameters - current_statement_begin__ = 20; - current_statement_begin__ = 21; - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(ev_later[k_0__]); - } - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(ev_sooner[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 42; - local_scalar_t__ logR; - (void) logR; // dummy to suppress unused var warning - - stan::math::initialize(logR, DUMMY_VAR__); - stan::math::fill(logR,DUMMY_VAR__); - current_statement_begin__ = 43; - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 46; - validate_non_negative_index("y_pred", "Tsubj", Tsubj); - vector y_pred(Tsubj); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 48; - stan::math::assign(logR, stan::math::log(r)); - - current_statement_begin__ = 51; - stan::math::assign(log_lik, 0); - current_statement_begin__ = 53; - for (int t = 1; t <= Tsubj; ++t) { - - current_statement_begin__ = 54; - stan::math::assign(log_lik, stan::model::deep_copy((log_lik + bernoulli_logit_log(get_base1(choice,t,"choice",1),(beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1))))))); - current_statement_begin__ = 57; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - bernoulli_rng(inv_logit((beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1)))), base_rng__), - "assigning variable y_pred"); - } - - // validate generated quantities - current_statement_begin__ = 42; - current_statement_begin__ = 43; - current_statement_begin__ = 46; - - // write generated quantities - vars__.push_back(logR); - vars__.push_back(log_lik); - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(y_pred[k_0__]); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_cs_single"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_later" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_sooner" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "logR"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "s"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_later" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_sooner" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "logR"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_exp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_exp"); - reader.add_event(100, 98, "end", "model_dd_exp"); - return reader; -} - -class model_dd_exp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > delay_later; - vector > amount_later; - vector > delay_sooner; - vector > amount_sooner; - vector > choice; -public: - model_dd_exp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_exp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_exp_namespace::model_dd_exp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - delay_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_later_limit_1__; ++i_1__) { - size_t delay_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - amount_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_later_limit_1__; ++i_1__) { - size_t amount_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - delay_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_sooner_limit_1__; ++i_1__) { - size_t delay_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - amount_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_sooner_limit_1__; ++i_1__) { - size_t amount_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 9; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_later[k0__][k1__]",delay_later[k0__][k1__],0); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_later[k0__][k1__]",amount_later[k0__][k1__],0); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_sooner[k0__][k1__]",delay_sooner[k0__][k1__],0); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_sooner[k0__][k1__]",amount_sooner[k0__][k1__],0); - } - } - current_statement_begin__ = 9; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 18; - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 19; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 22; - validate_non_negative_index("r_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_exp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("r_pr"))) - throw std::runtime_error("variable r_pr missing"); - vals_r__ = context__.vals_r("r_pr"); - pos__ = 0U; - validate_non_negative_index("r_pr", "N", N); - context__.validate_dims("initialization", "r_pr", "vector_d", context__.to_vec(N)); - vector_d r_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - r_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(r_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable r_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix r_pr; - (void) r_pr; // dummy to suppress unused var warning - if (jacobian__) - r_pr = in__.vector_constrain(N,lp__); - else - r_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 28; - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - current_statement_begin__ = 33; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(r(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: r" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 28; - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - current_statement_begin__ = 40; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 41; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(r_pr, 0, 1)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 47; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 49; - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 50; - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 52; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 53; - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) * stan::math::exp(((-(1) * get_base1(r,i,"r",1)) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2))))); - current_statement_begin__ = 54; - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) * stan::math::exp(((-(1) * get_base1(r,i,"r",1)) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2))))); - current_statement_begin__ = 55; - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), (get_base1(beta,i,"beta",1) * (ev_later - ev_sooner)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("r_pr"); - names__.push_back("beta_pr"); - names__.push_back("r"); - names__.push_back("beta"); - names__.push_back("mu_r"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_exp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d r_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 28; - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - current_statement_begin__ = 33; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - current_statement_begin__ = 28; - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 61; - local_scalar_t__ mu_r; - (void) mu_r; // dummy to suppress unused var warning - - stan::math::initialize(mu_r, DUMMY_VAR__); - stan::math::fill(mu_r,DUMMY_VAR__); - current_statement_begin__ = 62; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 65; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 68; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 71; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 72; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 73; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 77; - stan::math::assign(mu_r, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 78; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - - current_statement_begin__ = 81; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 83; - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 84; - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 86; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 88; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 89; - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) * stan::math::exp(((-(1) * get_base1(r,i,"r",1)) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2))))); - current_statement_begin__ = 90; - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) * stan::math::exp(((-(1) * get_base1(r,i,"r",1)) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2))))); - current_statement_begin__ = 91; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))))), - "assigning variable log_lik"); - current_statement_begin__ = 94; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))), base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 61; - check_greater_or_equal(function__,"mu_r",mu_r,0); - check_less_or_equal(function__,"mu_r",mu_r,1); - current_statement_begin__ = 62; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - current_statement_begin__ = 65; - current_statement_begin__ = 68; - - // write generated quantities - vars__.push_back(mu_r); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_exp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_hyperbolic_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_hyperbolic"); - reader.add_event(100, 98, "end", "model_dd_hyperbolic"); - return reader; -} - -class model_dd_hyperbolic : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > delay_later; - vector > amount_later; - vector > delay_sooner; - vector > amount_sooner; - vector > choice; -public: - model_dd_hyperbolic(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_hyperbolic(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_hyperbolic_namespace::model_dd_hyperbolic"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_later", "N", N); - validate_non_negative_index("delay_later", "T", T); - delay_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_later_limit_1__; ++i_1__) { - size_t delay_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_later", "N", N); - validate_non_negative_index("amount_later", "T", T); - amount_later = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_later_limit_1__; ++i_1__) { - size_t amount_later_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("delay_sooner", "N", N); - validate_non_negative_index("delay_sooner", "T", T); - delay_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < delay_sooner_limit_1__; ++i_1__) { - size_t delay_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(N,T)); - validate_non_negative_index("amount_sooner", "N", N); - validate_non_negative_index("amount_sooner", "T", T); - amount_sooner = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < amount_sooner_limit_1__; ++i_1__) { - size_t amount_sooner_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 9; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_later[k0__][k1__]",delay_later[k0__][k1__],0); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_later[k0__][k1__]",amount_later[k0__][k1__],0); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"delay_sooner[k0__][k1__]",delay_sooner[k0__][k1__],0); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"amount_sooner[k0__][k1__]",amount_sooner[k0__][k1__],0); - } - } - current_statement_begin__ = 9; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 18; - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 19; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 22; - validate_non_negative_index("k_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_hyperbolic() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("k_pr"))) - throw std::runtime_error("variable k_pr missing"); - vals_r__ = context__.vals_r("k_pr"); - pos__ = 0U; - validate_non_negative_index("k_pr", "N", N); - context__.validate_dims("initialization", "k_pr", "vector_d", context__.to_vec(N)); - vector_d k_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - k_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(k_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable k_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix k_pr; - (void) k_pr; // dummy to suppress unused var warning - if (jacobian__) - k_pr = in__.vector_constrain(N,lp__); - else - k_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 28; - validate_non_negative_index("k", "N", N); - Eigen::Matrix k(static_cast(N)); - (void) k; // dummy to suppress unused var warning - - stan::math::initialize(k, DUMMY_VAR__); - stan::math::fill(k,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(k, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(k_pr,i,"k_pr",1)))), - "assigning variable k"); - current_statement_begin__ = 33; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(k(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: k" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 28; - check_greater_or_equal(function__,"k",k,0); - check_less_or_equal(function__,"k",k,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - current_statement_begin__ = 40; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 41; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(k_pr, 0, 1)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 47; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 49; - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 50; - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 52; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 53; - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) / (1 + (get_base1(k,i,"k",1) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2))))); - current_statement_begin__ = 54; - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) / (1 + (get_base1(k,i,"k",1) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2))))); - current_statement_begin__ = 55; - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), (get_base1(beta,i,"beta",1) * (ev_later - ev_sooner)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("k_pr"); - names__.push_back("beta_pr"); - names__.push_back("k"); - names__.push_back("beta"); - names__.push_back("mu_k"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_hyperbolic_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d k_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(k_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 28; - validate_non_negative_index("k", "N", N); - Eigen::Matrix k(static_cast(N)); - (void) k; // dummy to suppress unused var warning - - stan::math::initialize(k, DUMMY_VAR__); - stan::math::fill(k,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(k, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(k_pr,i,"k_pr",1)))), - "assigning variable k"); - current_statement_begin__ = 33; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - current_statement_begin__ = 28; - check_greater_or_equal(function__,"k",k,0); - check_less_or_equal(function__,"k",k,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(k[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 61; - local_scalar_t__ mu_k; - (void) mu_k; // dummy to suppress unused var warning - - stan::math::initialize(mu_k, DUMMY_VAR__); - stan::math::fill(mu_k,DUMMY_VAR__); - current_statement_begin__ = 62; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 65; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 68; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 71; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 72; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 73; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 77; - stan::math::assign(mu_k, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 78; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - - current_statement_begin__ = 81; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 83; - local_scalar_t__ ev_later; - (void) ev_later; // dummy to suppress unused var warning - - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 84; - local_scalar_t__ ev_sooner; - (void) ev_sooner; // dummy to suppress unused var warning - - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 86; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 88; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 89; - stan::math::assign(ev_later, (get_base1(get_base1(amount_later,i,"amount_later",1),t,"amount_later",2) / (1 + (get_base1(k,i,"k",1) * get_base1(get_base1(delay_later,i,"delay_later",1),t,"delay_later",2))))); - current_statement_begin__ = 90; - stan::math::assign(ev_sooner, (get_base1(get_base1(amount_sooner,i,"amount_sooner",1),t,"amount_sooner",2) / (1 + (get_base1(k,i,"k",1) * get_base1(get_base1(delay_sooner,i,"delay_sooner",1),t,"delay_sooner",2))))); - current_statement_begin__ = 91; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))))), - "assigning variable log_lik"); - current_statement_begin__ = 94; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((get_base1(beta,i,"beta",1) * (ev_later - ev_sooner))), base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 61; - check_greater_or_equal(function__,"mu_k",mu_k,0); - check_less_or_equal(function__,"mu_k",mu_k,1); - current_statement_begin__ = 62; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - current_statement_begin__ = 65; - current_statement_begin__ = 68; - - // write generated quantities - vars__.push_back(mu_k); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_hyperbolic"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "k_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "k" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_k"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "k_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "k" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_k"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_dd_hyperbolic_single_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_dd_hyperbolic_single"); - reader.add_event(56, 54, "end", "model_dd_hyperbolic_single"); - return reader; -} - -class model_dd_hyperbolic_single : public prob_grad { -private: - int Tsubj; - vector delay_later; - vector amount_later; - vector delay_sooner; - vector amount_sooner; - vector choice; -public: - model_dd_hyperbolic_single(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_dd_hyperbolic_single(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_dd_hyperbolic_single_namespace::model_dd_hyperbolic_single"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec()); - Tsubj = int(0); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - Tsubj = vals_i__[pos__++]; - current_statement_begin__ = 3; - validate_non_negative_index("delay_later", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "delay_later", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("delay_later", "Tsubj", Tsubj); - delay_later = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("delay_later"); - pos__ = 0; - size_t delay_later_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < delay_later_limit_0__; ++i_0__) { - delay_later[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 4; - validate_non_negative_index("amount_later", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "amount_later", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("amount_later", "Tsubj", Tsubj); - amount_later = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("amount_later"); - pos__ = 0; - size_t amount_later_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < amount_later_limit_0__; ++i_0__) { - amount_later[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("delay_sooner", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "delay_sooner", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("delay_sooner", "Tsubj", Tsubj); - delay_sooner = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("delay_sooner"); - pos__ = 0; - size_t delay_sooner_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < delay_sooner_limit_0__; ++i_0__) { - delay_sooner[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("amount_sooner", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "amount_sooner", "double", context__.to_vec(Tsubj)); - validate_non_negative_index("amount_sooner", "Tsubj", Tsubj); - amount_sooner = std::vector(Tsubj,double(0)); - vals_r__ = context__.vals_r("amount_sooner"); - pos__ = 0; - size_t amount_sooner_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < amount_sooner_limit_0__; ++i_0__) { - amount_sooner[i_0__] = vals_r__[pos__++]; - } - current_statement_begin__ = 7; - validate_non_negative_index("choice", "Tsubj", Tsubj); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(Tsubj)); - validate_non_negative_index("choice", "Tsubj", Tsubj); - choice = std::vector(Tsubj,int(0)); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_0__ = Tsubj; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__] = vals_i__[pos__++]; - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"Tsubj",Tsubj,1); - current_statement_begin__ = 3; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"delay_later[k0__]",delay_later[k0__],0); - } - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"amount_later[k0__]",amount_later[k0__],0); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"delay_sooner[k0__]",delay_sooner[k0__],0); - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"amount_sooner[k0__]",amount_sooner[k0__],0); - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < Tsubj; ++k0__) { - check_greater_or_equal(function__,"choice[k0__]",choice[k0__],-(1)); - check_less_or_equal(function__,"choice[k0__]",choice[k0__],1); - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 14; - ++num_params_r__; - current_statement_begin__ = 15; - ++num_params_r__; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_dd_hyperbolic_single() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("k"))) - throw std::runtime_error("variable k missing"); - vals_r__ = context__.vals_r("k"); - pos__ = 0U; - context__.validate_dims("initialization", "k", "double", context__.to_vec()); - double k(0); - k = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,1,k); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable k: ") + e.what()); - } - - if (!(context__.contains_r("beta"))) - throw std::runtime_error("variable beta missing"); - vals_r__ = context__.vals_r("beta"); - pos__ = 0U; - context__.validate_dims("initialization", "beta", "double", context__.to_vec()); - double beta(0); - beta = vals_r__[pos__++]; - try { - writer__.scalar_lub_unconstrain(0,5,beta); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - local_scalar_t__ k; - (void) k; // dummy to suppress unused var warning - if (jacobian__) - k = in__.scalar_lub_constrain(0,1,lp__); - else - k = in__.scalar_lub_constrain(0,1); - - local_scalar_t__ beta; - (void) beta; // dummy to suppress unused var warning - if (jacobian__) - beta = in__.scalar_lub_constrain(0,5,lp__); - else - beta = in__.scalar_lub_constrain(0,5); - - - // transformed parameters - current_statement_begin__ = 19; - validate_non_negative_index("ev_later", "Tsubj", Tsubj); - vector ev_later(Tsubj); - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 20; - validate_non_negative_index("ev_sooner", "Tsubj", Tsubj); - vector ev_sooner(Tsubj); - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 22; - for (int t = 1; t <= Tsubj; ++t) { - - current_statement_begin__ = 23; - stan::model::assign(ev_later, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_later,t,"amount_later",1) / (1 + (k * get_base1(delay_later,t,"delay_later",1)))), - "assigning variable ev_later"); - current_statement_begin__ = 24; - stan::model::assign(ev_sooner, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_sooner,t,"amount_sooner",1) / (1 + (k * get_base1(delay_sooner,t,"delay_sooner",1)))), - "assigning variable ev_sooner"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < Tsubj; ++i0__) { - if (stan::math::is_uninitialized(ev_later[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ev_later" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < Tsubj; ++i0__) { - if (stan::math::is_uninitialized(ev_sooner[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ev_sooner" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 19; - current_statement_begin__ = 20; - - // model body - - current_statement_begin__ = 29; - lp_accum__.add(uniform_log(k, 0, 1)); - current_statement_begin__ = 30; - lp_accum__.add(uniform_log(beta, 0, 5)); - current_statement_begin__ = 32; - for (int t = 1; t <= Tsubj; ++t) { - - current_statement_begin__ = 33; - lp_accum__.add(bernoulli_logit_log(get_base1(choice,t,"choice",1), (beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1))))); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("k"); - names__.push_back("beta"); - names__.push_back("ev_later"); - names__.push_back("ev_sooner"); - names__.push_back("logK"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(Tsubj); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_dd_hyperbolic_single_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - double k = in__.scalar_lub_constrain(0,1); - double beta = in__.scalar_lub_constrain(0,5); - vars__.push_back(k); - vars__.push_back(beta); - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 19; - validate_non_negative_index("ev_later", "Tsubj", Tsubj); - vector ev_later(Tsubj); - stan::math::initialize(ev_later, DUMMY_VAR__); - stan::math::fill(ev_later,DUMMY_VAR__); - current_statement_begin__ = 20; - validate_non_negative_index("ev_sooner", "Tsubj", Tsubj); - vector ev_sooner(Tsubj); - stan::math::initialize(ev_sooner, DUMMY_VAR__); - stan::math::fill(ev_sooner,DUMMY_VAR__); - - - current_statement_begin__ = 22; - for (int t = 1; t <= Tsubj; ++t) { - - current_statement_begin__ = 23; - stan::model::assign(ev_later, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_later,t,"amount_later",1) / (1 + (k * get_base1(delay_later,t,"delay_later",1)))), - "assigning variable ev_later"); - current_statement_begin__ = 24; - stan::model::assign(ev_sooner, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - (get_base1(amount_sooner,t,"amount_sooner",1) / (1 + (k * get_base1(delay_sooner,t,"delay_sooner",1)))), - "assigning variable ev_sooner"); - } - - // validate transformed parameters - current_statement_begin__ = 19; - current_statement_begin__ = 20; - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(ev_later[k_0__]); - } - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(ev_sooner[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 37; - local_scalar_t__ logK; - (void) logK; // dummy to suppress unused var warning - - stan::math::initialize(logK, DUMMY_VAR__); - stan::math::fill(logK,DUMMY_VAR__); - current_statement_begin__ = 38; - local_scalar_t__ log_lik; - (void) log_lik; // dummy to suppress unused var warning - - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 41; - validate_non_negative_index("y_pred", "Tsubj", Tsubj); - vector y_pred(Tsubj); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 43; - stan::math::assign(logK, stan::math::log(k)); - - current_statement_begin__ = 46; - stan::math::assign(log_lik, 0); - current_statement_begin__ = 47; - for (int t = 1; t <= Tsubj; ++t) { - - current_statement_begin__ = 48; - stan::math::assign(log_lik, stan::model::deep_copy((log_lik + bernoulli_logit_log(get_base1(choice,t,"choice",1),(beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1))))))); - current_statement_begin__ = 51; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()), - bernoulli_rng(inv_logit((beta * (get_base1(ev_later,t,"ev_later",1) - get_base1(ev_sooner,t,"ev_sooner",1)))), base_rng__), - "assigning variable y_pred"); - } - - // validate generated quantities - current_statement_begin__ = 37; - current_statement_begin__ = 38; - current_statement_begin__ = 41; - - // write generated quantities - vars__.push_back(logK); - vars__.push_back(log_lik); - for (int k_0__ = 0; k_0__ < Tsubj; ++k_0__) { - vars__.push_back(y_pred[k_0__]); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_dd_hyperbolic_single"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "k"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_later" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_sooner" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "logK"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - param_name_stream__.str(std::string()); - param_name_stream__ << "k"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "beta"; - param_names__.push_back(param_name_stream__.str()); - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_later" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ev_sooner" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "logK"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= Tsubj; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m1_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m1"); - reader.add_event(133, 131, "end", "model_gng_m1"); - return reader; -} - -class model_gng_m1 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m1(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m1(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m1_namespace::model_gng_m1"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - current_statement_begin__ = 11; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 12; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 11; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 17; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 18; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 19; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m1() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 25; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 26; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 29; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 30; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 31; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 33; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 25; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 26; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - current_statement_begin__ = 39; - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - current_statement_begin__ = 40; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 43; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - current_statement_begin__ = 47; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 48; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 49; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 50; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 51; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 52; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 54; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 55; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 56; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 57; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 59; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 60; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable wv_g"); - current_statement_begin__ = 61; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 62; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 63; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 64; - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - current_statement_begin__ = 67; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 68; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 70; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m1_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 25; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 26; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 29; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 30; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 31; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 33; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),rho_pr)))); - - // validate transformed parameters - current_statement_begin__ = 25; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 26; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 77; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 78; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 81; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 84; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 87; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 88; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 89; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 93; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 94; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 95; - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - current_statement_begin__ = 98; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 99; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 100; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 101; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 102; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 103; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 105; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 106; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 107; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 108; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 110; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 112; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 113; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable wv_g"); - current_statement_begin__ = 114; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 115; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 116; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 117; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 120; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 123; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 124; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 126; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 77; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 78; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 79; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - current_statement_begin__ = 81; - current_statement_begin__ = 84; - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m1"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m1_reg_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m1_reg"); - reader.add_event(143, 141, "end", "model_gng_m1_reg"); - return reader; -} - -class model_gng_m1_reg : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m1_reg(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m1_reg(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m1_reg_namespace::model_gng_m1_reg"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 4; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 5; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 3; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 4; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 6; - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - current_statement_begin__ = 12; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 13; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 12; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 18; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 19; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 20; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m1_reg() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 26; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 30; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 31; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 32; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 34; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 26; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - current_statement_begin__ = 40; - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - current_statement_begin__ = 41; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - current_statement_begin__ = 48; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 49; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 50; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 51; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 52; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 53; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 55; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 56; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 57; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 58; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 60; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 61; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable wv_g"); - current_statement_begin__ = 62; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 63; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 64; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 65; - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - current_statement_begin__ = 68; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 69; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 71; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("Qgo"); - names__.push_back("Qnogo"); - names__.push_back("Wgo"); - names__.push_back("Wnogo"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m1_reg_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 26; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 30; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 31; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 32; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 34; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),rho_pr)))); - - // validate transformed parameters - current_statement_begin__ = 26; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 78; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 80; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 81; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 82; - validate_non_negative_index("Qgo", "N", N); - validate_non_negative_index("Qgo", "T", T); - vector > Qgo(N, (vector(T))); - stan::math::initialize(Qgo, DUMMY_VAR__); - stan::math::fill(Qgo,DUMMY_VAR__); - current_statement_begin__ = 83; - validate_non_negative_index("Qnogo", "N", N); - validate_non_negative_index("Qnogo", "T", T); - vector > Qnogo(N, (vector(T))); - stan::math::initialize(Qnogo, DUMMY_VAR__); - stan::math::fill(Qnogo,DUMMY_VAR__); - current_statement_begin__ = 84; - validate_non_negative_index("Wgo", "N", N); - validate_non_negative_index("Wgo", "T", T); - vector > Wgo(N, (vector(T))); - stan::math::initialize(Wgo, DUMMY_VAR__); - stan::math::fill(Wgo,DUMMY_VAR__); - current_statement_begin__ = 85; - validate_non_negative_index("Wnogo", "N", N); - validate_non_negative_index("Wnogo", "T", T); - vector > Wnogo(N, (vector(T))); - stan::math::initialize(Wnogo, DUMMY_VAR__); - stan::math::fill(Wnogo,DUMMY_VAR__); - current_statement_begin__ = 88; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 91; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 92; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 93; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 97; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 98; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 99; - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - current_statement_begin__ = 102; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 103; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 104; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 105; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 106; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 107; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 109; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 110; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 111; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 112; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 114; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 116; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 117; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable wv_g"); - current_statement_begin__ = 118; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 119; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 120; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 121; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 124; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 127; - stan::model::assign(Qgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable Qgo"); - current_statement_begin__ = 128; - stan::model::assign(Qnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable Qnogo"); - current_statement_begin__ = 129; - stan::model::assign(Wgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1), - "assigning variable Wgo"); - current_statement_begin__ = 130; - stan::model::assign(Wnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1), - "assigning variable Wnogo"); - current_statement_begin__ = 133; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 134; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 136; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 78; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 79; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 80; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - current_statement_begin__ = 81; - current_statement_begin__ = 82; - current_statement_begin__ = 83; - current_statement_begin__ = 84; - current_statement_begin__ = 85; - current_statement_begin__ = 88; - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m1_reg"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m2_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m2"); - reader.add_event(144, 142, "end", "model_gng_m2"); - return reader; -} - -class model_gng_m2 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m2(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m2(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m2_namespace::model_gng_m2"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - current_statement_begin__ = 11; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 12; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 11; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 17; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 18; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 19; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m2() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 26; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 33; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 35; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 36; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 26; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 28; - current_statement_begin__ = 29; - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - current_statement_begin__ = 42; - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 43; - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 47; - lp_accum__.add(cauchy_log(get_base1(sigma,3,"sigma",1), 0, 1.0)); - current_statement_begin__ = 48; - lp_accum__.add(normal_log(get_base1(sigma,4,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - current_statement_begin__ = 56; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 57; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 58; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 59; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 60; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 61; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 63; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 64; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 65; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 66; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 68; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 69; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - (get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)), - "assigning variable wv_g"); - current_statement_begin__ = 70; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 71; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 72; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 73; - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - current_statement_begin__ = 76; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 77; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 79; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m2_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 26; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 33; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 35; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 36; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),rho_pr)))); - - // validate transformed parameters - current_statement_begin__ = 26; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 28; - current_statement_begin__ = 29; - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 86; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 87; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 88; - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - current_statement_begin__ = 89; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 91; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 94; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 97; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 98; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 99; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 103; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 104; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 105; - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - current_statement_begin__ = 106; - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - - current_statement_begin__ = 109; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 110; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 111; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 112; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 113; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 114; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 116; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 117; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 118; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 119; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 121; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 123; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 124; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - (get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)), - "assigning variable wv_g"); - current_statement_begin__ = 125; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 126; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 127; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 128; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 131; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 134; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 135; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 137; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 86; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 87; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 88; - current_statement_begin__ = 89; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - current_statement_begin__ = 91; - current_statement_begin__ = 94; - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m2"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m2_reg_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m2_reg"); - reader.add_event(154, 152, "end", "model_gng_m2_reg"); - return reader; -} - -class model_gng_m2_reg : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m2_reg(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m2_reg(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m2_reg_namespace::model_gng_m2_reg"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 4; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 5; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 3; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 4; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 6; - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - current_statement_begin__ = 12; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 13; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 12; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 18; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 19; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 20; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m2_reg() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 27; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 32; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 33; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 34; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 36; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 37; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 27; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 29; - current_statement_begin__ = 30; - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - current_statement_begin__ = 43; - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 48; - lp_accum__.add(cauchy_log(get_base1(sigma,3,"sigma",1), 0, 1.0)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(get_base1(sigma,4,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - current_statement_begin__ = 57; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 58; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 59; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 60; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 61; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 62; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 64; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 65; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 66; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 67; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 69; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 70; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - (get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)), - "assigning variable wv_g"); - current_statement_begin__ = 71; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 72; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 73; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 74; - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - current_statement_begin__ = 77; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 78; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 80; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("Qgo"); - names__.push_back("Qnogo"); - names__.push_back("Wgo"); - names__.push_back("Wnogo"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m2_reg_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 27; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 32; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 33; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 34; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 36; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 37; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),rho_pr)))); - - // validate transformed parameters - current_statement_begin__ = 27; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 29; - current_statement_begin__ = 30; - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 87; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 88; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 89; - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - current_statement_begin__ = 90; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 91; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 92; - validate_non_negative_index("Qgo", "N", N); - validate_non_negative_index("Qgo", "T", T); - vector > Qgo(N, (vector(T))); - stan::math::initialize(Qgo, DUMMY_VAR__); - stan::math::fill(Qgo,DUMMY_VAR__); - current_statement_begin__ = 93; - validate_non_negative_index("Qnogo", "N", N); - validate_non_negative_index("Qnogo", "T", T); - vector > Qnogo(N, (vector(T))); - stan::math::initialize(Qnogo, DUMMY_VAR__); - stan::math::fill(Qnogo,DUMMY_VAR__); - current_statement_begin__ = 94; - validate_non_negative_index("Wgo", "N", N); - validate_non_negative_index("Wgo", "T", T); - vector > Wgo(N, (vector(T))); - stan::math::initialize(Wgo, DUMMY_VAR__); - stan::math::fill(Wgo,DUMMY_VAR__); - current_statement_begin__ = 95; - validate_non_negative_index("Wnogo", "N", N); - validate_non_negative_index("Wnogo", "T", T); - vector > Wnogo(N, (vector(T))); - stan::math::initialize(Wnogo, DUMMY_VAR__); - stan::math::fill(Wnogo,DUMMY_VAR__); - current_statement_begin__ = 98; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 101; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 102; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 103; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 107; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 108; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 109; - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - current_statement_begin__ = 110; - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - - current_statement_begin__ = 113; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 114; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 115; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 116; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 117; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 118; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 120; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 121; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 122; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 123; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 125; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 127; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 128; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - (get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)), - "assigning variable wv_g"); - current_statement_begin__ = 129; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 130; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 131; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 132; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 135; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 138; - stan::model::assign(Qgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable Qgo"); - current_statement_begin__ = 139; - stan::model::assign(Qnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable Qnogo"); - current_statement_begin__ = 140; - stan::model::assign(Wgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1), - "assigning variable Wgo"); - current_statement_begin__ = 141; - stan::model::assign(Wnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1), - "assigning variable Wnogo"); - current_statement_begin__ = 144; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 145; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 147; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 87; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 88; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 89; - current_statement_begin__ = 90; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - current_statement_begin__ = 91; - current_statement_begin__ = 92; - current_statement_begin__ = 93; - current_statement_begin__ = 94; - current_statement_begin__ = 95; - current_statement_begin__ = 98; - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m2_reg"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m3_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m3"); - reader.add_event(161, 159, "end", "model_gng_m3"); - return reader; -} - -class model_gng_m3 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m3(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m3(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m3_namespace::model_gng_m3"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - current_statement_begin__ = 11; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 12; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 11; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 17; - validate_non_negative_index("mu_p", "5", 5); - num_params_r__ += 5; - current_statement_begin__ = 18; - validate_non_negative_index("sigma", "5", 5); - num_params_r__ += 5; - current_statement_begin__ = 19; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m3() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "5", 5); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(5)); - vector_d mu_p(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "5", 5); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(5)); - vector_d sigma(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(5,lp__); - else - mu_p = in__.vector_constrain(5); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,5,lp__); - else - sigma = in__.vector_lb_constrain(0,5); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 27; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 33; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 34; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 35; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 37; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 38; - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - current_statement_begin__ = 39; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 27; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 29; - current_statement_begin__ = 30; - current_statement_begin__ = 31; - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - current_statement_begin__ = 45; - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 48; - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(get_base1(mu_p,5,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 51; - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(3, 4), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(get_base1(sigma,5,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(pi_pr, 0, 1.0)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - current_statement_begin__ = 61; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 62; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 63; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 64; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 65; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 66; - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - current_statement_begin__ = 67; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 69; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 70; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 71; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 72; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 73; - stan::math::assign(sv, initV); - current_statement_begin__ = 75; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 76; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - current_statement_begin__ = 77; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 78; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 79; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 80; - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - current_statement_begin__ = 83; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - current_statement_begin__ = 86; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 87; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 89; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("pi_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("pi"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_pi"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m3_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(5); - vector_d sigma = in__.vector_lb_constrain(0,5); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 27; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 33; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 34; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 35; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 37; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 38; - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - current_statement_begin__ = 39; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rho_pr)))); - - // validate transformed parameters - current_statement_begin__ = 27; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 29; - current_statement_begin__ = 30; - current_statement_begin__ = 31; - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 96; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 97; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 98; - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - current_statement_begin__ = 99; - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - current_statement_begin__ = 100; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 102; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 105; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 108; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 109; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 110; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 114; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 115; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 116; - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - current_statement_begin__ = 117; - stan::math::assign(mu_pi, get_base1(mu_p,4,"mu_p",1)); - current_statement_begin__ = 118; - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,5,"mu_p",1))); - - current_statement_begin__ = 121; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 122; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 123; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 124; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 125; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 126; - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - current_statement_begin__ = 127; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 129; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 130; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 131; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 132; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 133; - stan::math::assign(sv, initV); - current_statement_begin__ = 135; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 137; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 138; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - current_statement_begin__ = 139; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 140; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 141; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 142; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 145; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 148; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - current_statement_begin__ = 151; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 152; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 154; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 96; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 97; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 98; - current_statement_begin__ = 99; - current_statement_begin__ = 100; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - current_statement_begin__ = 102; - current_statement_begin__ = 105; - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_pi); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m3"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m3_reg_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m3_reg"); - reader.add_event(173, 171, "end", "model_gng_m3_reg"); - return reader; -} - -class model_gng_m3_reg : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m3_reg(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m3_reg(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m3_reg_namespace::model_gng_m3_reg"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 4; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 5; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 3; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 4; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 6; - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - current_statement_begin__ = 12; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 13; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 12; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 18; - validate_non_negative_index("mu_p", "5", 5); - num_params_r__ += 5; - current_statement_begin__ = 19; - validate_non_negative_index("sigma", "5", 5); - num_params_r__ += 5; - current_statement_begin__ = 20; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m3_reg() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "5", 5); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(5)); - vector_d mu_p(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "5", 5); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(5)); - vector_d sigma(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(5,lp__); - else - mu_p = in__.vector_constrain(5); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,5,lp__); - else - sigma = in__.vector_lb_constrain(0,5); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 28; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 34; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 35; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 36; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 38; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 39; - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - current_statement_begin__ = 40; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rho_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 28; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 30; - current_statement_begin__ = 31; - current_statement_begin__ = 32; - check_greater_or_equal(function__,"rho",rho,0); - - // model body - - current_statement_begin__ = 46; - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 48; - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(get_base1(mu_p,5,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 52; - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(3, 4), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(get_base1(sigma,5,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(pi_pr, 0, 1.0)); - current_statement_begin__ = 60; - lp_accum__.add(normal_log(rho_pr, 0, 1.0)); - current_statement_begin__ = 62; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 63; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 64; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 65; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 66; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 67; - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - current_statement_begin__ = 68; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 70; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 71; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 72; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 73; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 74; - stan::math::assign(sv, initV); - current_statement_begin__ = 76; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 77; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - current_statement_begin__ = 78; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 79; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 80; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 81; - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - current_statement_begin__ = 84; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - current_statement_begin__ = 87; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 88; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 90; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("pi_pr"); - names__.push_back("rho_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("pi"); - names__.push_back("rho"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_pi"); - names__.push_back("mu_rho"); - names__.push_back("log_lik"); - names__.push_back("Qgo"); - names__.push_back("Qnogo"); - names__.push_back("Wgo"); - names__.push_back("Wnogo"); - names__.push_back("SV"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m3_reg_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(5); - vector_d sigma = in__.vector_lb_constrain(0,5); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 28; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - - - current_statement_begin__ = 34; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 35; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 36; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 38; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 39; - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - current_statement_begin__ = 40; - stan::math::assign(rho, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rho_pr)))); - - // validate transformed parameters - current_statement_begin__ = 28; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 30; - current_statement_begin__ = 31; - current_statement_begin__ = 32; - check_greater_or_equal(function__,"rho",rho,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 97; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 98; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 99; - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - current_statement_begin__ = 100; - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - current_statement_begin__ = 101; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 102; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 103; - validate_non_negative_index("Qgo", "N", N); - validate_non_negative_index("Qgo", "T", T); - vector > Qgo(N, (vector(T))); - stan::math::initialize(Qgo, DUMMY_VAR__); - stan::math::fill(Qgo,DUMMY_VAR__); - current_statement_begin__ = 104; - validate_non_negative_index("Qnogo", "N", N); - validate_non_negative_index("Qnogo", "T", T); - vector > Qnogo(N, (vector(T))); - stan::math::initialize(Qnogo, DUMMY_VAR__); - stan::math::fill(Qnogo,DUMMY_VAR__); - current_statement_begin__ = 105; - validate_non_negative_index("Wgo", "N", N); - validate_non_negative_index("Wgo", "T", T); - vector > Wgo(N, (vector(T))); - stan::math::initialize(Wgo, DUMMY_VAR__); - stan::math::fill(Wgo,DUMMY_VAR__); - current_statement_begin__ = 106; - validate_non_negative_index("Wnogo", "N", N); - validate_non_negative_index("Wnogo", "T", T); - vector > Wnogo(N, (vector(T))); - stan::math::initialize(Wnogo, DUMMY_VAR__); - stan::math::fill(Wnogo,DUMMY_VAR__); - current_statement_begin__ = 107; - validate_non_negative_index("SV", "N", N); - validate_non_negative_index("SV", "T", T); - vector > SV(N, (vector(T))); - stan::math::initialize(SV, DUMMY_VAR__); - stan::math::fill(SV,DUMMY_VAR__); - current_statement_begin__ = 110; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 113; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 114; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 115; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 119; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 120; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 121; - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - current_statement_begin__ = 122; - stan::math::assign(mu_pi, get_base1(mu_p,4,"mu_p",1)); - current_statement_begin__ = 123; - stan::math::assign(mu_rho, stan::math::exp(get_base1(mu_p,5,"mu_p",1))); - - current_statement_begin__ = 126; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 127; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 128; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 129; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 130; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 131; - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - current_statement_begin__ = 132; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 134; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 135; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 136; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 137; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 138; - stan::math::assign(sv, initV); - current_statement_begin__ = 140; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 142; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 143; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - current_statement_begin__ = 144; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 145; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 146; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 147; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 150; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 153; - stan::model::assign(Qgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable Qgo"); - current_statement_begin__ = 154; - stan::model::assign(Qnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable Qnogo"); - current_statement_begin__ = 155; - stan::model::assign(Wgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1), - "assigning variable Wgo"); - current_statement_begin__ = 156; - stan::model::assign(Wnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1), - "assigning variable Wnogo"); - current_statement_begin__ = 157; - stan::model::assign(SV, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1), - "assigning variable SV"); - current_statement_begin__ = 160; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - current_statement_begin__ = 163; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 164; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 166; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rho,i,"rho",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 97; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 98; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 99; - current_statement_begin__ = 100; - current_statement_begin__ = 101; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - current_statement_begin__ = 102; - current_statement_begin__ = 103; - current_statement_begin__ = 104; - current_statement_begin__ = 105; - current_statement_begin__ = 106; - current_statement_begin__ = 107; - current_statement_begin__ = 110; - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_pi); - vars__.push_back(mu_rho); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(SV[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m3_reg"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "SV" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "SV" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m4_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m4"); - reader.add_event(192, 190, "end", "model_gng_m4"); - return reader; -} - -class model_gng_m4 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m4(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m4(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m4_namespace::model_gng_m4"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - current_statement_begin__ = 11; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 12; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 11; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 17; - validate_non_negative_index("mu_p", "6", 6); - num_params_r__ += 6; - current_statement_begin__ = 18; - validate_non_negative_index("sigma", "6", 6); - num_params_r__ += 6; - current_statement_begin__ = 19; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("rhoRew_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("rhoPun_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m4() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "6", 6); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(6)); - vector_d mu_p(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "6", 6); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); - vector_d sigma(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rhoRew_pr"))) - throw std::runtime_error("variable rhoRew_pr missing"); - vals_r__ = context__.vals_r("rhoRew_pr"); - pos__ = 0U; - validate_non_negative_index("rhoRew_pr", "N", N); - context__.validate_dims("initialization", "rhoRew_pr", "vector_d", context__.to_vec(N)); - vector_d rhoRew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rhoRew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rhoRew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rhoRew_pr: ") + e.what()); - } - - if (!(context__.contains_r("rhoPun_pr"))) - throw std::runtime_error("variable rhoPun_pr missing"); - vals_r__ = context__.vals_r("rhoPun_pr"); - pos__ = 0U; - validate_non_negative_index("rhoPun_pr", "N", N); - context__.validate_dims("initialization", "rhoPun_pr", "vector_d", context__.to_vec(N)); - vector_d rhoPun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rhoPun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rhoPun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rhoPun_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(6,lp__); - else - mu_p = in__.vector_constrain(6); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,6,lp__); - else - sigma = in__.vector_lb_constrain(0,6); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix rhoRew_pr; - (void) rhoRew_pr; // dummy to suppress unused var warning - if (jacobian__) - rhoRew_pr = in__.vector_constrain(N,lp__); - else - rhoRew_pr = in__.vector_constrain(N); - - Eigen::Matrix rhoPun_pr; - (void) rhoPun_pr; // dummy to suppress unused var warning - if (jacobian__) - rhoPun_pr = in__.vector_constrain(N,lp__); - else - rhoPun_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 28; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("rhoRew", "N", N); - Eigen::Matrix rhoRew(static_cast(N)); - (void) rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(rhoRew, DUMMY_VAR__); - stan::math::fill(rhoRew,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("rhoPun", "N", N); - Eigen::Matrix rhoPun(static_cast(N)); - (void) rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(rhoPun, DUMMY_VAR__); - stan::math::fill(rhoPun,DUMMY_VAR__); - - - current_statement_begin__ = 35; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 36; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 37; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 39; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 40; - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - current_statement_begin__ = 41; - stan::math::assign(rhoRew, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rhoRew_pr)))); - current_statement_begin__ = 42; - stan::math::assign(rhoPun, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),rhoPun_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rhoRew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rhoRew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rhoPun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rhoPun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 28; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 30; - current_statement_begin__ = 31; - current_statement_begin__ = 32; - check_greater_or_equal(function__,"rhoRew",rhoRew,0); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"rhoPun",rhoPun,0); - - // model body - - current_statement_begin__ = 48; - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(get_base1(mu_p,5,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(get_base1(mu_p,6,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 55; - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(3, 4), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(5, 6), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 60; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 61; - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - current_statement_begin__ = 62; - lp_accum__.add(normal_log(pi_pr, 0, 1.0)); - current_statement_begin__ = 63; - lp_accum__.add(normal_log(rhoRew_pr, 0, 1.0)); - current_statement_begin__ = 64; - lp_accum__.add(normal_log(rhoPun_pr, 0, 1.0)); - current_statement_begin__ = 66; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 67; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 68; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 69; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 70; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 71; - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - current_statement_begin__ = 72; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 74; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 75; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 76; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 77; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 78; - stan::math::assign(sv, initV); - current_statement_begin__ = 80; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 81; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - current_statement_begin__ = 82; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 83; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 84; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 85; - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - current_statement_begin__ = 88; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 89; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } else { - - current_statement_begin__ = 91; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } - current_statement_begin__ = 95; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 96; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 97; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 99; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } - } else { - - current_statement_begin__ = 102; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 103; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } else { - - current_statement_begin__ = 105; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("pi_pr"); - names__.push_back("rhoRew_pr"); - names__.push_back("rhoPun_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("pi"); - names__.push_back("rhoRew"); - names__.push_back("rhoPun"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_pi"); - names__.push_back("mu_rhoRew"); - names__.push_back("mu_rhoPun"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m4_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(6); - vector_d sigma = in__.vector_lb_constrain(0,6); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d rhoRew_pr = in__.vector_constrain(N); - vector_d rhoPun_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoRew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoPun_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 28; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("rhoRew", "N", N); - Eigen::Matrix rhoRew(static_cast(N)); - (void) rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(rhoRew, DUMMY_VAR__); - stan::math::fill(rhoRew,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("rhoPun", "N", N); - Eigen::Matrix rhoPun(static_cast(N)); - (void) rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(rhoPun, DUMMY_VAR__); - stan::math::fill(rhoPun,DUMMY_VAR__); - - - current_statement_begin__ = 35; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 36; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 37; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 39; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 40; - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - current_statement_begin__ = 41; - stan::math::assign(rhoRew, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rhoRew_pr)))); - current_statement_begin__ = 42; - stan::math::assign(rhoPun, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),rhoPun_pr)))); - - // validate transformed parameters - current_statement_begin__ = 28; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 30; - current_statement_begin__ = 31; - current_statement_begin__ = 32; - check_greater_or_equal(function__,"rhoRew",rhoRew,0); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"rhoPun",rhoPun,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoRew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoPun[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 113; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 114; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 115; - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - current_statement_begin__ = 116; - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - current_statement_begin__ = 117; - local_scalar_t__ mu_rhoRew; - (void) mu_rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(mu_rhoRew, DUMMY_VAR__); - stan::math::fill(mu_rhoRew,DUMMY_VAR__); - current_statement_begin__ = 118; - local_scalar_t__ mu_rhoPun; - (void) mu_rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(mu_rhoPun, DUMMY_VAR__); - stan::math::fill(mu_rhoPun,DUMMY_VAR__); - current_statement_begin__ = 120; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 123; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 126; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 127; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 128; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 132; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 133; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 134; - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - current_statement_begin__ = 135; - stan::math::assign(mu_pi, get_base1(mu_p,4,"mu_p",1)); - current_statement_begin__ = 136; - stan::math::assign(mu_rhoRew, stan::math::exp(get_base1(mu_p,5,"mu_p",1))); - current_statement_begin__ = 137; - stan::math::assign(mu_rhoPun, stan::math::exp(get_base1(mu_p,6,"mu_p",1))); - - current_statement_begin__ = 140; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 141; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 142; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 143; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 144; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 145; - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - current_statement_begin__ = 146; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 148; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 149; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 150; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 151; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 152; - stan::math::assign(sv, initV); - current_statement_begin__ = 154; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 156; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 157; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - current_statement_begin__ = 158; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 159; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 160; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 161; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 164; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 167; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 168; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } else { - - current_statement_begin__ = 170; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } - current_statement_begin__ = 174; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 175; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 176; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 178; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } - } else { - - current_statement_begin__ = 181; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 182; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } else { - - current_statement_begin__ = 184; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 113; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 114; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 115; - current_statement_begin__ = 116; - current_statement_begin__ = 117; - check_greater_or_equal(function__,"mu_rhoRew",mu_rhoRew,0); - current_statement_begin__ = 118; - check_greater_or_equal(function__,"mu_rhoPun",mu_rhoPun,0); - current_statement_begin__ = 120; - current_statement_begin__ = 123; - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_pi); - vars__.push_back(mu_rhoRew); - vars__.push_back(mu_rhoPun); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m4"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoRew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoPun"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoRew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoPun"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_gng_m4_reg_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_gng_m4_reg"); - reader.add_event(204, 202, "end", "model_gng_m4_reg"); - return reader; -} - -class model_gng_m4_reg : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > pressed; - vector > cue; - vector_d initV; -public: - model_gng_m4_reg(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_gng_m4_reg(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_gng_m4_reg_namespace::model_gng_m4_reg"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 4; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 5; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - context__.validate_dims("data initialization", "pressed", "int", context__.to_vec(N,T)); - validate_non_negative_index("pressed", "N", N); - validate_non_negative_index("pressed", "T", T); - pressed = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("pressed"); - pos__ = 0; - size_t pressed_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < pressed_limit_1__; ++i_1__) { - size_t pressed_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < pressed_limit_0__; ++i_0__) { - pressed[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - context__.validate_dims("data initialization", "cue", "int", context__.to_vec(N,T)); - validate_non_negative_index("cue", "N", N); - validate_non_negative_index("cue", "T", T); - cue = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("cue"); - pos__ = 0; - size_t cue_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cue_limit_1__; ++i_1__) { - size_t cue_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cue_limit_0__; ++i_0__) { - cue[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 3; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 4; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 6; - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],-(1)); - check_less_or_equal(function__,"pressed[k0__][k1__]",pressed[k0__][k1__],1); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],1); - check_less_or_equal(function__,"cue[k0__][k1__]",cue[k0__][k1__],4); - } - } - // initialize data variables - current_statement_begin__ = 12; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 13; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 12; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 18; - validate_non_negative_index("mu_p", "6", 6); - num_params_r__ += 6; - current_statement_begin__ = 19; - validate_non_negative_index("sigma", "6", 6); - num_params_r__ += 6; - current_statement_begin__ = 20; - validate_non_negative_index("xi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("b_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("rhoRew_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 25; - validate_non_negative_index("rhoPun_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_gng_m4_reg() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "6", 6); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(6)); - vector_d mu_p(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "6", 6); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); - vector_d sigma(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("xi_pr"))) - throw std::runtime_error("variable xi_pr missing"); - vals_r__ = context__.vals_r("xi_pr"); - pos__ = 0U; - validate_non_negative_index("xi_pr", "N", N); - context__.validate_dims("initialization", "xi_pr", "vector_d", context__.to_vec(N)); - vector_d xi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - xi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(xi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable xi_pr: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("b_pr"))) - throw std::runtime_error("variable b_pr missing"); - vals_r__ = context__.vals_r("b_pr"); - pos__ = 0U; - validate_non_negative_index("b_pr", "N", N); - context__.validate_dims("initialization", "b_pr", "vector_d", context__.to_vec(N)); - vector_d b_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - b_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(b_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable b_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rhoRew_pr"))) - throw std::runtime_error("variable rhoRew_pr missing"); - vals_r__ = context__.vals_r("rhoRew_pr"); - pos__ = 0U; - validate_non_negative_index("rhoRew_pr", "N", N); - context__.validate_dims("initialization", "rhoRew_pr", "vector_d", context__.to_vec(N)); - vector_d rhoRew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rhoRew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rhoRew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rhoRew_pr: ") + e.what()); - } - - if (!(context__.contains_r("rhoPun_pr"))) - throw std::runtime_error("variable rhoPun_pr missing"); - vals_r__ = context__.vals_r("rhoPun_pr"); - pos__ = 0U; - validate_non_negative_index("rhoPun_pr", "N", N); - context__.validate_dims("initialization", "rhoPun_pr", "vector_d", context__.to_vec(N)); - vector_d rhoPun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rhoPun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rhoPun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rhoPun_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(6,lp__); - else - mu_p = in__.vector_constrain(6); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,6,lp__); - else - sigma = in__.vector_lb_constrain(0,6); - - Eigen::Matrix xi_pr; - (void) xi_pr; // dummy to suppress unused var warning - if (jacobian__) - xi_pr = in__.vector_constrain(N,lp__); - else - xi_pr = in__.vector_constrain(N); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix b_pr; - (void) b_pr; // dummy to suppress unused var warning - if (jacobian__) - b_pr = in__.vector_constrain(N,lp__); - else - b_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix rhoRew_pr; - (void) rhoRew_pr; // dummy to suppress unused var warning - if (jacobian__) - rhoRew_pr = in__.vector_constrain(N,lp__); - else - rhoRew_pr = in__.vector_constrain(N); - - Eigen::Matrix rhoPun_pr; - (void) rhoPun_pr; // dummy to suppress unused var warning - if (jacobian__) - rhoPun_pr = in__.vector_constrain(N,lp__); - else - rhoPun_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 29; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("rhoRew", "N", N); - Eigen::Matrix rhoRew(static_cast(N)); - (void) rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(rhoRew, DUMMY_VAR__); - stan::math::fill(rhoRew,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("rhoPun", "N", N); - Eigen::Matrix rhoPun(static_cast(N)); - (void) rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(rhoPun, DUMMY_VAR__); - stan::math::fill(rhoPun,DUMMY_VAR__); - - - current_statement_begin__ = 36; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 37; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 38; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 40; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 41; - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - current_statement_begin__ = 42; - stan::math::assign(rhoRew, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rhoRew_pr)))); - current_statement_begin__ = 43; - stan::math::assign(rhoPun, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),rhoPun_pr)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(xi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: xi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(b(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: b" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rhoRew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rhoRew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rhoPun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rhoPun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 29; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 31; - current_statement_begin__ = 32; - current_statement_begin__ = 33; - check_greater_or_equal(function__,"rhoRew",rhoRew,0); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"rhoPun",rhoPun,0); - - // model body - - current_statement_begin__ = 49; - lp_accum__.add(normal_log(get_base1(mu_p,1,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(get_base1(mu_p,2,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(get_base1(mu_p,3,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(get_base1(mu_p,4,"mu_p",1), 0, 10.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(get_base1(mu_p,5,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(get_base1(mu_p,6,"mu_p",1), 0, 1.0)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 56; - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(3, 4), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(5, 6), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 60; - lp_accum__.add(normal_log(xi_pr, 0, 1.0)); - current_statement_begin__ = 61; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 62; - lp_accum__.add(normal_log(b_pr, 0, 1.0)); - current_statement_begin__ = 63; - lp_accum__.add(normal_log(pi_pr, 0, 1.0)); - current_statement_begin__ = 64; - lp_accum__.add(normal_log(rhoRew_pr, 0, 1.0)); - current_statement_begin__ = 65; - lp_accum__.add(normal_log(rhoPun_pr, 0, 1.0)); - current_statement_begin__ = 67; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 68; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 69; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 70; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 71; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 72; - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - current_statement_begin__ = 73; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 75; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 76; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 77; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 78; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 79; - stan::math::assign(sv, initV); - current_statement_begin__ = 81; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 82; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - current_statement_begin__ = 83; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 84; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 85; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 86; - lp_accum__.add(bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2), get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1))); - current_statement_begin__ = 89; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 90; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } else { - - current_statement_begin__ = 92; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } - current_statement_begin__ = 96; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 97; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 98; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 100; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } - } else { - - current_statement_begin__ = 103; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 104; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } else { - - current_statement_begin__ = 106; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("xi_pr"); - names__.push_back("ep_pr"); - names__.push_back("b_pr"); - names__.push_back("pi_pr"); - names__.push_back("rhoRew_pr"); - names__.push_back("rhoPun_pr"); - names__.push_back("xi"); - names__.push_back("ep"); - names__.push_back("b"); - names__.push_back("pi"); - names__.push_back("rhoRew"); - names__.push_back("rhoPun"); - names__.push_back("mu_xi"); - names__.push_back("mu_ep"); - names__.push_back("mu_b"); - names__.push_back("mu_pi"); - names__.push_back("mu_rhoRew"); - names__.push_back("mu_rhoPun"); - names__.push_back("log_lik"); - names__.push_back("Qgo"); - names__.push_back("Qnogo"); - names__.push_back("Wgo"); - names__.push_back("Wnogo"); - names__.push_back("SV"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_gng_m4_reg_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(6); - vector_d sigma = in__.vector_lb_constrain(0,6); - vector_d xi_pr = in__.vector_constrain(N); - vector_d ep_pr = in__.vector_constrain(N); - vector_d b_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d rhoRew_pr = in__.vector_constrain(N); - vector_d rhoPun_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoRew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoPun_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 29; - validate_non_negative_index("xi", "N", N); - Eigen::Matrix xi(static_cast(N)); - (void) xi; // dummy to suppress unused var warning - - stan::math::initialize(xi, DUMMY_VAR__); - stan::math::fill(xi,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("ep", "N", N); - Eigen::Matrix ep(static_cast(N)); - (void) ep; // dummy to suppress unused var warning - - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("b", "N", N); - Eigen::Matrix b(static_cast(N)); - (void) b; // dummy to suppress unused var warning - - stan::math::initialize(b, DUMMY_VAR__); - stan::math::fill(b,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("rhoRew", "N", N); - Eigen::Matrix rhoRew(static_cast(N)); - (void) rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(rhoRew, DUMMY_VAR__); - stan::math::fill(rhoRew,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("rhoPun", "N", N); - Eigen::Matrix rhoPun(static_cast(N)); - (void) rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(rhoPun, DUMMY_VAR__); - stan::math::fill(rhoPun,DUMMY_VAR__); - - - current_statement_begin__ = 36; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 37; - stan::model::assign(xi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(xi_pr,i,"xi_pr",1)))), - "assigning variable xi"); - current_statement_begin__ = 38; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - } - current_statement_begin__ = 40; - stan::math::assign(b, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),b_pr))); - current_statement_begin__ = 41; - stan::math::assign(pi, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),pi_pr))); - current_statement_begin__ = 42; - stan::math::assign(rhoRew, stan::math::exp(add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),rhoRew_pr)))); - current_statement_begin__ = 43; - stan::math::assign(rhoPun, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),rhoPun_pr)))); - - // validate transformed parameters - current_statement_begin__ = 29; - check_greater_or_equal(function__,"xi",xi,0); - check_less_or_equal(function__,"xi",xi,1); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"ep",ep,0); - check_less_or_equal(function__,"ep",ep,1); - current_statement_begin__ = 31; - current_statement_begin__ = 32; - current_statement_begin__ = 33; - check_greater_or_equal(function__,"rhoRew",rhoRew,0); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"rhoPun",rhoPun,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(xi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(b[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoRew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rhoPun[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 114; - local_scalar_t__ mu_xi; - (void) mu_xi; // dummy to suppress unused var warning - - stan::math::initialize(mu_xi, DUMMY_VAR__); - stan::math::fill(mu_xi,DUMMY_VAR__); - current_statement_begin__ = 115; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 116; - local_scalar_t__ mu_b; - (void) mu_b; // dummy to suppress unused var warning - - stan::math::initialize(mu_b, DUMMY_VAR__); - stan::math::fill(mu_b,DUMMY_VAR__); - current_statement_begin__ = 117; - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - current_statement_begin__ = 118; - local_scalar_t__ mu_rhoRew; - (void) mu_rhoRew; // dummy to suppress unused var warning - - stan::math::initialize(mu_rhoRew, DUMMY_VAR__); - stan::math::fill(mu_rhoRew,DUMMY_VAR__); - current_statement_begin__ = 119; - local_scalar_t__ mu_rhoPun; - (void) mu_rhoPun; // dummy to suppress unused var warning - - stan::math::initialize(mu_rhoPun, DUMMY_VAR__); - stan::math::fill(mu_rhoPun,DUMMY_VAR__); - current_statement_begin__ = 120; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 121; - validate_non_negative_index("Qgo", "N", N); - validate_non_negative_index("Qgo", "T", T); - vector > Qgo(N, (vector(T))); - stan::math::initialize(Qgo, DUMMY_VAR__); - stan::math::fill(Qgo,DUMMY_VAR__); - current_statement_begin__ = 122; - validate_non_negative_index("Qnogo", "N", N); - validate_non_negative_index("Qnogo", "T", T); - vector > Qnogo(N, (vector(T))); - stan::math::initialize(Qnogo, DUMMY_VAR__); - stan::math::fill(Qnogo,DUMMY_VAR__); - current_statement_begin__ = 123; - validate_non_negative_index("Wgo", "N", N); - validate_non_negative_index("Wgo", "T", T); - vector > Wgo(N, (vector(T))); - stan::math::initialize(Wgo, DUMMY_VAR__); - stan::math::fill(Wgo,DUMMY_VAR__); - current_statement_begin__ = 124; - validate_non_negative_index("Wnogo", "N", N); - validate_non_negative_index("Wnogo", "T", T); - vector > Wnogo(N, (vector(T))); - stan::math::initialize(Wnogo, DUMMY_VAR__); - stan::math::fill(Wnogo,DUMMY_VAR__); - current_statement_begin__ = 125; - validate_non_negative_index("SV", "N", N); - validate_non_negative_index("SV", "T", T); - vector > SV(N, (vector(T))); - stan::math::initialize(SV, DUMMY_VAR__); - stan::math::fill(SV,DUMMY_VAR__); - current_statement_begin__ = 128; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 131; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 132; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 133; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 137; - stan::math::assign(mu_xi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 138; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 139; - stan::math::assign(mu_b, get_base1(mu_p,3,"mu_p",1)); - current_statement_begin__ = 140; - stan::math::assign(mu_pi, get_base1(mu_p,4,"mu_p",1)); - current_statement_begin__ = 141; - stan::math::assign(mu_rhoRew, stan::math::exp(get_base1(mu_p,5,"mu_p",1))); - current_statement_begin__ = 142; - stan::math::assign(mu_rhoPun, stan::math::exp(get_base1(mu_p,6,"mu_p",1))); - - current_statement_begin__ = 145; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 146; - validate_non_negative_index("wv_g", "4", 4); - Eigen::Matrix wv_g(static_cast(4)); - (void) wv_g; // dummy to suppress unused var warning - - stan::math::initialize(wv_g, DUMMY_VAR__); - stan::math::fill(wv_g,DUMMY_VAR__); - current_statement_begin__ = 147; - validate_non_negative_index("wv_ng", "4", 4); - Eigen::Matrix wv_ng(static_cast(4)); - (void) wv_ng; // dummy to suppress unused var warning - - stan::math::initialize(wv_ng, DUMMY_VAR__); - stan::math::fill(wv_ng,DUMMY_VAR__); - current_statement_begin__ = 148; - validate_non_negative_index("qv_g", "4", 4); - Eigen::Matrix qv_g(static_cast(4)); - (void) qv_g; // dummy to suppress unused var warning - - stan::math::initialize(qv_g, DUMMY_VAR__); - stan::math::fill(qv_g,DUMMY_VAR__); - current_statement_begin__ = 149; - validate_non_negative_index("qv_ng", "4", 4); - Eigen::Matrix qv_ng(static_cast(4)); - (void) qv_ng; // dummy to suppress unused var warning - - stan::math::initialize(qv_ng, DUMMY_VAR__); - stan::math::fill(qv_ng,DUMMY_VAR__); - current_statement_begin__ = 150; - validate_non_negative_index("sv", "4", 4); - Eigen::Matrix sv(static_cast(4)); - (void) sv; // dummy to suppress unused var warning - - stan::math::initialize(sv, DUMMY_VAR__); - stan::math::fill(sv,DUMMY_VAR__); - current_statement_begin__ = 151; - validate_non_negative_index("pGo", "4", 4); - Eigen::Matrix pGo(static_cast(4)); - (void) pGo; // dummy to suppress unused var warning - - stan::math::initialize(pGo, DUMMY_VAR__); - stan::math::fill(pGo,DUMMY_VAR__); - - - current_statement_begin__ = 153; - stan::math::assign(wv_g, initV); - current_statement_begin__ = 154; - stan::math::assign(wv_ng, initV); - current_statement_begin__ = 155; - stan::math::assign(qv_g, initV); - current_statement_begin__ = 156; - stan::math::assign(qv_ng, initV); - current_statement_begin__ = 157; - stan::math::assign(sv, initV); - current_statement_begin__ = 159; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 161; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 162; - stan::model::assign(wv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - ((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + get_base1(b,i,"b",1)) + (get_base1(pi,i,"pi",1) * get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))), - "assigning variable wv_g"); - current_statement_begin__ = 163; - stan::model::assign(wv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable wv_ng"); - current_statement_begin__ = 164; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - inv_logit((get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1) - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1))), - "assigning variable pGo"); - current_statement_begin__ = 165; - stan::model::assign(pGo, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1) * (1 - get_base1(xi,i,"xi",1))) + (get_base1(xi,i,"xi",1) / 2))), - "assigning variable pGo"); - current_statement_begin__ = 166; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2),get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 169; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(get_base1(pGo,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"pGo",1), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 172; - stan::model::assign(Qgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1), - "assigning variable Qgo"); - current_statement_begin__ = 173; - stan::model::assign(Qnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1), - "assigning variable Qnogo"); - current_statement_begin__ = 174; - stan::model::assign(Wgo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_g",1), - "assigning variable Wgo"); - current_statement_begin__ = 175; - stan::model::assign(Wnogo, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(wv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"wv_ng",1), - "assigning variable Wnogo"); - current_statement_begin__ = 176; - stan::model::assign(SV, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1), - "assigning variable SV"); - current_statement_begin__ = 179; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 180; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } else { - - current_statement_begin__ = 182; - stan::model::assign(sv, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(sv,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"sv",1))))), - "assigning variable sv"); - } - current_statement_begin__ = 186; - if (as_bool(get_base1(get_base1(pressed,i,"pressed",1),t,"pressed",2))) { - - current_statement_begin__ = 187; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 188; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } else { - - current_statement_begin__ = 190; - stan::model::assign(qv_g, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_g,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_g",1))))), - "assigning variable qv_g"); - } - } else { - - current_statement_begin__ = 193; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 194; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoRew,i,"rhoRew",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } else { - - current_statement_begin__ = 196; - stan::model::assign(qv_ng, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(cue,i,"cue",1),t,"cue",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1) + (get_base1(ep,i,"ep",1) * ((get_base1(rhoPun,i,"rhoPun",1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(qv_ng,get_base1(get_base1(cue,i,"cue",1),t,"cue",2),"qv_ng",1))))), - "assigning variable qv_ng"); - } - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 114; - check_greater_or_equal(function__,"mu_xi",mu_xi,0); - check_less_or_equal(function__,"mu_xi",mu_xi,1); - current_statement_begin__ = 115; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 116; - current_statement_begin__ = 117; - current_statement_begin__ = 118; - check_greater_or_equal(function__,"mu_rhoRew",mu_rhoRew,0); - current_statement_begin__ = 119; - check_greater_or_equal(function__,"mu_rhoPun",mu_rhoPun,0); - current_statement_begin__ = 120; - current_statement_begin__ = 121; - current_statement_begin__ = 122; - current_statement_begin__ = 123; - current_statement_begin__ = 124; - current_statement_begin__ = 125; - current_statement_begin__ = 128; - - // write generated quantities - vars__.push_back(mu_xi); - vars__.push_back(mu_ep); - vars__.push_back(mu_b); - vars__.push_back(mu_pi); - vars__.push_back(mu_rhoRew); - vars__.push_back(mu_rhoPun); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Qnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wgo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Wnogo[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(SV[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_gng_m4_reg"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoRew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoPun"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "SV" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "xi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "b" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoRew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rhoPun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_xi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_b"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoRew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rhoPun"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Qnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wgo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Wnogo" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "SV" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_igt_orl_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_igt_orl"); - reader.add_event(206, 204, "end", "model_igt_orl"); - return reader; -} - -class model_igt_orl : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > sign_out; - vector > choice; - vector_d initV; -public: - model_igt_orl(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_igt_orl(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_igt_orl_namespace::model_igt_orl"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("sign_out", "N", N); - validate_non_negative_index("sign_out", "T", T); - context__.validate_dims("data initialization", "sign_out", "double", context__.to_vec(N,T)); - validate_non_negative_index("sign_out", "N", N); - validate_non_negative_index("sign_out", "T", T); - sign_out = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("sign_out"); - pos__ = 0; - size_t sign_out_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < sign_out_limit_1__; ++i_1__) { - size_t sign_out_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < sign_out_limit_0__; ++i_0__) { - sign_out[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - current_statement_begin__ = 7; - // initialize data variables - current_statement_begin__ = 10; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 11; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 10; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 16; - validate_non_negative_index("mu_p", "5", 5); - num_params_r__ += 5; - current_statement_begin__ = 17; - validate_non_negative_index("sigma", "5", 5); - num_params_r__ += 5; - current_statement_begin__ = 20; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("K_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("betaF_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("betaP_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_igt_orl() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "5", 5); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(5)); - vector_d mu_p(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "5", 5); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(5)); - vector_d sigma(static_cast(5)); - for (int j1__ = 0U; j1__ < 5; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("K_pr"))) - throw std::runtime_error("variable K_pr missing"); - vals_r__ = context__.vals_r("K_pr"); - pos__ = 0U; - validate_non_negative_index("K_pr", "N", N); - context__.validate_dims("initialization", "K_pr", "vector_d", context__.to_vec(N)); - vector_d K_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - K_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(K_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable K_pr: ") + e.what()); - } - - if (!(context__.contains_r("betaF_pr"))) - throw std::runtime_error("variable betaF_pr missing"); - vals_r__ = context__.vals_r("betaF_pr"); - pos__ = 0U; - validate_non_negative_index("betaF_pr", "N", N); - context__.validate_dims("initialization", "betaF_pr", "vector_d", context__.to_vec(N)); - vector_d betaF_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - betaF_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(betaF_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable betaF_pr: ") + e.what()); - } - - if (!(context__.contains_r("betaP_pr"))) - throw std::runtime_error("variable betaP_pr missing"); - vals_r__ = context__.vals_r("betaP_pr"); - pos__ = 0U; - validate_non_negative_index("betaP_pr", "N", N); - context__.validate_dims("initialization", "betaP_pr", "vector_d", context__.to_vec(N)); - vector_d betaP_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - betaP_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(betaP_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable betaP_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(5,lp__); - else - mu_p = in__.vector_constrain(5); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,5,lp__); - else - sigma = in__.vector_lb_constrain(0,5); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix K_pr; - (void) K_pr; // dummy to suppress unused var warning - if (jacobian__) - K_pr = in__.vector_constrain(N,lp__); - else - K_pr = in__.vector_constrain(N); - - Eigen::Matrix betaF_pr; - (void) betaF_pr; // dummy to suppress unused var warning - if (jacobian__) - betaF_pr = in__.vector_constrain(N,lp__); - else - betaF_pr = in__.vector_constrain(N); - - Eigen::Matrix betaP_pr; - (void) betaP_pr; // dummy to suppress unused var warning - if (jacobian__) - betaP_pr = in__.vector_constrain(N,lp__); - else - betaP_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 28; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("K", "N", N); - Eigen::Matrix K(static_cast(N)); - (void) K; // dummy to suppress unused var warning - - stan::math::initialize(K, DUMMY_VAR__); - stan::math::fill(K,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("betaF", "N", N); - Eigen::Matrix betaF(static_cast(N)); - (void) betaF; // dummy to suppress unused var warning - - stan::math::initialize(betaF, DUMMY_VAR__); - stan::math::fill(betaF,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("betaP", "N", N); - Eigen::Matrix betaP(static_cast(N)); - (void) betaP; // dummy to suppress unused var warning - - stan::math::initialize(betaP, DUMMY_VAR__); - stan::math::fill(betaP,DUMMY_VAR__); - - - current_statement_begin__ = 34; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 35; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 36; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 37; - stan::model::assign(K, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx(((get_base1(mu_p,3,"mu_p",1) + get_base1(sigma,3,"sigma",1)) + get_base1(K_pr,i,"K_pr",1))) * 5), - "assigning variable K"); - } - current_statement_begin__ = 39; - stan::math::assign(betaF, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),betaF_pr))); - current_statement_begin__ = 40; - stan::math::assign(betaP, add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),betaP_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(K(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: K" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(betaF(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: betaF" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(betaP(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: betaP" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 28; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"K",K,0); - check_less_or_equal(function__,"K",K,5); - current_statement_begin__ = 31; - current_statement_begin__ = 32; - - // model body - - current_statement_begin__ = 44; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 3), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 46; - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(4, 5), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(Arew_pr, 0, 1.0)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(Apun_pr, 0, 1.0)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(K_pr, 0, 1.0)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(betaF_pr, 0, 1.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(betaP_pr, 0, 1.0)); - current_statement_begin__ = 55; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 57; - validate_non_negative_index("ef", "4", 4); - Eigen::Matrix ef(static_cast(4)); - (void) ef; // dummy to suppress unused var warning - - stan::math::initialize(ef, DUMMY_VAR__); - stan::math::fill(ef,DUMMY_VAR__); - current_statement_begin__ = 58; - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 59; - validate_non_negative_index("PEfreq_fic", "4", 4); - Eigen::Matrix PEfreq_fic(static_cast(4)); - (void) PEfreq_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEfreq_fic, DUMMY_VAR__); - stan::math::fill(PEfreq_fic,DUMMY_VAR__); - current_statement_begin__ = 60; - validate_non_negative_index("PEval_fic", "4", 4); - Eigen::Matrix PEval_fic(static_cast(4)); - (void) PEval_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEval_fic, DUMMY_VAR__); - stan::math::fill(PEval_fic,DUMMY_VAR__); - current_statement_begin__ = 61; - validate_non_negative_index("pers", "4", 4); - Eigen::Matrix pers(static_cast(4)); - (void) pers; // dummy to suppress unused var warning - - stan::math::initialize(pers, DUMMY_VAR__); - stan::math::fill(pers,DUMMY_VAR__); - current_statement_begin__ = 62; - validate_non_negative_index("util", "4", 4); - Eigen::Matrix util(static_cast(4)); - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - current_statement_begin__ = 64; - local_scalar_t__ PEval; - (void) PEval; // dummy to suppress unused var warning - - stan::math::initialize(PEval, DUMMY_VAR__); - stan::math::fill(PEval,DUMMY_VAR__); - current_statement_begin__ = 65; - local_scalar_t__ PEfreq; - (void) PEfreq; // dummy to suppress unused var warning - - stan::math::initialize(PEfreq, DUMMY_VAR__); - stan::math::fill(PEfreq,DUMMY_VAR__); - current_statement_begin__ = 66; - local_scalar_t__ efChosen; - (void) efChosen; // dummy to suppress unused var warning - - stan::math::initialize(efChosen, DUMMY_VAR__); - stan::math::fill(efChosen,DUMMY_VAR__); - current_statement_begin__ = 67; - local_scalar_t__ evChosen; - (void) evChosen; // dummy to suppress unused var warning - - stan::math::initialize(evChosen, DUMMY_VAR__); - stan::math::fill(evChosen,DUMMY_VAR__); - current_statement_begin__ = 68; - local_scalar_t__ K_tr; - (void) K_tr; // dummy to suppress unused var warning - - stan::math::initialize(K_tr, DUMMY_VAR__); - stan::math::fill(K_tr,DUMMY_VAR__); - - - current_statement_begin__ = 71; - stan::math::assign(ef, initV); - current_statement_begin__ = 72; - stan::math::assign(ev, initV); - current_statement_begin__ = 73; - stan::math::assign(pers, initV); - current_statement_begin__ = 74; - stan::math::assign(util, initV); - current_statement_begin__ = 75; - stan::math::assign(K_tr, (pow(3,get_base1(K,i,"K",1)) - 1)); - current_statement_begin__ = 77; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 79; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), util)); - current_statement_begin__ = 82; - stan::math::assign(PEval, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 83; - stan::math::assign(PEfreq, (get_base1(get_base1(sign_out,i,"sign_out",1),t,"sign_out",2) - get_base1(ef,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ef",1))); - current_statement_begin__ = 84; - stan::math::assign(PEfreq_fic, subtract((-(get_base1(get_base1(sign_out,i,"sign_out",1),t,"sign_out",2)) / 3),ef)); - current_statement_begin__ = 87; - stan::math::assign(efChosen, get_base1(ef,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ef",1)); - current_statement_begin__ = 88; - stan::math::assign(evChosen, get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1)); - current_statement_begin__ = 90; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 92; - stan::math::assign(ef, stan::model::deep_copy(add(ef,multiply(get_base1(Apun,i,"Apun",1),PEfreq_fic)))); - current_statement_begin__ = 94; - stan::model::assign(ef, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (efChosen + (get_base1(Arew,i,"Arew",1) * PEfreq)), - "assigning variable ef"); - current_statement_begin__ = 95; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (evChosen + (get_base1(Arew,i,"Arew",1) * PEval)), - "assigning variable ev"); - } else { - - current_statement_begin__ = 98; - stan::math::assign(ef, stan::model::deep_copy(add(ef,multiply(get_base1(Arew,i,"Arew",1),PEfreq_fic)))); - current_statement_begin__ = 100; - stan::model::assign(ef, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (efChosen + (get_base1(Apun,i,"Apun",1) * PEfreq)), - "assigning variable ef"); - current_statement_begin__ = 101; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (evChosen + (get_base1(Apun,i,"Apun",1) * PEval)), - "assigning variable ev"); - } - current_statement_begin__ = 105; - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - 1, - "assigning variable pers"); - current_statement_begin__ = 106; - stan::math::assign(pers, stan::model::deep_copy(divide(pers,(1 + K_tr)))); - current_statement_begin__ = 109; - stan::math::assign(util, add(add(ev,multiply(ef,get_base1(betaF,i,"betaF",1))),multiply(pers,get_base1(betaP,i,"betaP",1)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Arew_pr"); - names__.push_back("Apun_pr"); - names__.push_back("K_pr"); - names__.push_back("betaF_pr"); - names__.push_back("betaP_pr"); - names__.push_back("Arew"); - names__.push_back("Apun"); - names__.push_back("K"); - names__.push_back("betaF"); - names__.push_back("betaP"); - names__.push_back("mu_Arew"); - names__.push_back("mu_Apun"); - names__.push_back("mu_K"); - names__.push_back("mu_betaF"); - names__.push_back("mu_betaP"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(5); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_igt_orl_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(5); - vector_d sigma = in__.vector_lb_constrain(0,5); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d K_pr = in__.vector_constrain(N); - vector_d betaF_pr = in__.vector_constrain(N); - vector_d betaP_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 5; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(K_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(betaF_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(betaP_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 28; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("K", "N", N); - Eigen::Matrix K(static_cast(N)); - (void) K; // dummy to suppress unused var warning - - stan::math::initialize(K, DUMMY_VAR__); - stan::math::fill(K,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("betaF", "N", N); - Eigen::Matrix betaF(static_cast(N)); - (void) betaF; // dummy to suppress unused var warning - - stan::math::initialize(betaF, DUMMY_VAR__); - stan::math::fill(betaF,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("betaP", "N", N); - Eigen::Matrix betaP(static_cast(N)); - (void) betaP; // dummy to suppress unused var warning - - stan::math::initialize(betaP, DUMMY_VAR__); - stan::math::fill(betaP,DUMMY_VAR__); - - - current_statement_begin__ = 34; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 35; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 36; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 37; - stan::model::assign(K, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx(((get_base1(mu_p,3,"mu_p",1) + get_base1(sigma,3,"sigma",1)) + get_base1(K_pr,i,"K_pr",1))) * 5), - "assigning variable K"); - } - current_statement_begin__ = 39; - stan::math::assign(betaF, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),betaF_pr))); - current_statement_begin__ = 40; - stan::math::assign(betaP, add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),betaP_pr))); - - // validate transformed parameters - current_statement_begin__ = 28; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"K",K,0); - check_less_or_equal(function__,"K",K,5); - current_statement_begin__ = 31; - current_statement_begin__ = 32; - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(K[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(betaF[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(betaP[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 116; - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - current_statement_begin__ = 117; - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - current_statement_begin__ = 118; - local_scalar_t__ mu_K; - (void) mu_K; // dummy to suppress unused var warning - - stan::math::initialize(mu_K, DUMMY_VAR__); - stan::math::fill(mu_K,DUMMY_VAR__); - current_statement_begin__ = 119; - local_scalar_t__ mu_betaF; - (void) mu_betaF; // dummy to suppress unused var warning - - stan::math::initialize(mu_betaF, DUMMY_VAR__); - stan::math::fill(mu_betaF,DUMMY_VAR__); - current_statement_begin__ = 120; - local_scalar_t__ mu_betaP; - (void) mu_betaP; // dummy to suppress unused var warning - - stan::math::initialize(mu_betaP, DUMMY_VAR__); - stan::math::fill(mu_betaP,DUMMY_VAR__); - current_statement_begin__ = 123; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 126; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 129; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 130; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 131; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 135; - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 136; - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 137; - stan::math::assign(mu_K, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - current_statement_begin__ = 138; - stan::math::assign(mu_betaF, get_base1(mu_p,4,"mu_p",1)); - current_statement_begin__ = 139; - stan::math::assign(mu_betaP, get_base1(mu_p,5,"mu_p",1)); - - current_statement_begin__ = 142; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 144; - validate_non_negative_index("ef", "4", 4); - Eigen::Matrix ef(static_cast(4)); - (void) ef; // dummy to suppress unused var warning - - stan::math::initialize(ef, DUMMY_VAR__); - stan::math::fill(ef,DUMMY_VAR__); - current_statement_begin__ = 145; - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 146; - validate_non_negative_index("PEfreq_fic", "4", 4); - Eigen::Matrix PEfreq_fic(static_cast(4)); - (void) PEfreq_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEfreq_fic, DUMMY_VAR__); - stan::math::fill(PEfreq_fic,DUMMY_VAR__); - current_statement_begin__ = 147; - validate_non_negative_index("PEval_fic", "4", 4); - Eigen::Matrix PEval_fic(static_cast(4)); - (void) PEval_fic; // dummy to suppress unused var warning - - stan::math::initialize(PEval_fic, DUMMY_VAR__); - stan::math::fill(PEval_fic,DUMMY_VAR__); - current_statement_begin__ = 148; - validate_non_negative_index("pers", "4", 4); - Eigen::Matrix pers(static_cast(4)); - (void) pers; // dummy to suppress unused var warning - - stan::math::initialize(pers, DUMMY_VAR__); - stan::math::fill(pers,DUMMY_VAR__); - current_statement_begin__ = 149; - validate_non_negative_index("util", "4", 4); - Eigen::Matrix util(static_cast(4)); - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - current_statement_begin__ = 151; - local_scalar_t__ PEval; - (void) PEval; // dummy to suppress unused var warning - - stan::math::initialize(PEval, DUMMY_VAR__); - stan::math::fill(PEval,DUMMY_VAR__); - current_statement_begin__ = 152; - local_scalar_t__ PEfreq; - (void) PEfreq; // dummy to suppress unused var warning - - stan::math::initialize(PEfreq, DUMMY_VAR__); - stan::math::fill(PEfreq,DUMMY_VAR__); - current_statement_begin__ = 153; - local_scalar_t__ efChosen; - (void) efChosen; // dummy to suppress unused var warning - - stan::math::initialize(efChosen, DUMMY_VAR__); - stan::math::fill(efChosen,DUMMY_VAR__); - current_statement_begin__ = 154; - local_scalar_t__ evChosen; - (void) evChosen; // dummy to suppress unused var warning - - stan::math::initialize(evChosen, DUMMY_VAR__); - stan::math::fill(evChosen,DUMMY_VAR__); - current_statement_begin__ = 155; - local_scalar_t__ K_tr; - (void) K_tr; // dummy to suppress unused var warning - - stan::math::initialize(K_tr, DUMMY_VAR__); - stan::math::fill(K_tr,DUMMY_VAR__); - - - current_statement_begin__ = 158; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 159; - stan::math::assign(ef, initV); - current_statement_begin__ = 160; - stan::math::assign(ev, initV); - current_statement_begin__ = 161; - stan::math::assign(pers, initV); - current_statement_begin__ = 162; - stan::math::assign(util, initV); - current_statement_begin__ = 163; - stan::math::assign(K_tr, (pow(3,get_base1(K,i,"K",1)) - 1)); - current_statement_begin__ = 165; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 167; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),util))), - "assigning variable log_lik"); - current_statement_begin__ = 170; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(util), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 173; - stan::math::assign(PEval, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 174; - stan::math::assign(PEfreq, (get_base1(get_base1(sign_out,i,"sign_out",1),t,"sign_out",2) - get_base1(ef,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ef",1))); - current_statement_begin__ = 175; - stan::math::assign(PEfreq_fic, subtract((-(get_base1(get_base1(sign_out,i,"sign_out",1),t,"sign_out",2)) / 3),ef)); - current_statement_begin__ = 178; - stan::math::assign(efChosen, get_base1(ef,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ef",1)); - current_statement_begin__ = 179; - stan::math::assign(evChosen, get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1)); - current_statement_begin__ = 181; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 183; - stan::math::assign(ef, stan::model::deep_copy(add(ef,multiply(get_base1(Apun,i,"Apun",1),PEfreq_fic)))); - current_statement_begin__ = 185; - stan::model::assign(ef, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (efChosen + (get_base1(Arew,i,"Arew",1) * PEfreq)), - "assigning variable ef"); - current_statement_begin__ = 186; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (evChosen + (get_base1(Arew,i,"Arew",1) * PEval)), - "assigning variable ev"); - } else { - - current_statement_begin__ = 189; - stan::math::assign(ef, stan::model::deep_copy(add(ef,multiply(get_base1(Arew,i,"Arew",1),PEfreq_fic)))); - current_statement_begin__ = 191; - stan::model::assign(ef, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (efChosen + (get_base1(Apun,i,"Apun",1) * PEfreq)), - "assigning variable ef"); - current_statement_begin__ = 192; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (evChosen + (get_base1(Apun,i,"Apun",1) * PEval)), - "assigning variable ev"); - } - current_statement_begin__ = 196; - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - 1, - "assigning variable pers"); - current_statement_begin__ = 197; - stan::math::assign(pers, stan::model::deep_copy(divide(pers,(1 + K_tr)))); - current_statement_begin__ = 200; - stan::math::assign(util, add(add(ev,multiply(ef,get_base1(betaF,i,"betaF",1))),multiply(pers,get_base1(betaP,i,"betaP",1)))); - } - } - } - - // validate generated quantities - current_statement_begin__ = 116; - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - current_statement_begin__ = 117; - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - current_statement_begin__ = 118; - check_greater_or_equal(function__,"mu_K",mu_K,0); - check_less_or_equal(function__,"mu_K",mu_K,5); - current_statement_begin__ = 119; - current_statement_begin__ = 120; - current_statement_begin__ = 123; - current_statement_begin__ = 126; - - // write generated quantities - vars__.push_back(mu_Arew); - vars__.push_back(mu_Apun); - vars__.push_back(mu_K); - vars__.push_back(mu_betaF); - vars__.push_back(mu_betaP); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_igt_orl"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaF_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaP_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaF" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaP" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_K"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_betaF"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_betaP"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 5; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaF_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaP_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaF" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "betaP" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_K"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_betaF"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_betaP"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_igt_pvl_decay_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_igt_pvl_decay"); - reader.add_event(133, 131, "end", "model_igt_pvl_decay"); - return reader; -} - -class model_igt_pvl_decay : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > choice; - vector_d initV; -public: - model_igt_pvl_decay(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_igt_pvl_decay(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_igt_pvl_decay_namespace::model_igt_pvl_decay"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - // initialize data variables - current_statement_begin__ = 9; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 10; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 9; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 19; - validate_non_negative_index("A_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("cons_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("lambda_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_igt_pvl_decay() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("A_pr"))) - throw std::runtime_error("variable A_pr missing"); - vals_r__ = context__.vals_r("A_pr"); - pos__ = 0U; - validate_non_negative_index("A_pr", "N", N); - context__.validate_dims("initialization", "A_pr", "vector_d", context__.to_vec(N)); - vector_d A_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - A_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(A_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("cons_pr"))) - throw std::runtime_error("variable cons_pr missing"); - vals_r__ = context__.vals_r("cons_pr"); - pos__ = 0U; - validate_non_negative_index("cons_pr", "N", N); - context__.validate_dims("initialization", "cons_pr", "vector_d", context__.to_vec(N)); - vector_d cons_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - cons_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(cons_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable cons_pr: ") + e.what()); - } - - if (!(context__.contains_r("lambda_pr"))) - throw std::runtime_error("variable lambda_pr missing"); - vals_r__ = context__.vals_r("lambda_pr"); - pos__ = 0U; - validate_non_negative_index("lambda_pr", "N", N); - context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); - vector_d lambda_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix A_pr; - (void) A_pr; // dummy to suppress unused var warning - if (jacobian__) - A_pr = in__.vector_constrain(N,lp__); - else - A_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix cons_pr; - (void) cons_pr; // dummy to suppress unused var warning - if (jacobian__) - cons_pr = in__.vector_constrain(N,lp__); - else - cons_pr = in__.vector_constrain(N); - - Eigen::Matrix lambda_pr; - (void) lambda_pr; // dummy to suppress unused var warning - if (jacobian__) - lambda_pr = in__.vector_constrain(N,lp__); - else - lambda_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 26; - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - current_statement_begin__ = 33; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - current_statement_begin__ = 34; - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - current_statement_begin__ = 35; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(A(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: A" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(cons(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: cons" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 26; - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - - // model body - - current_statement_begin__ = 40; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 41; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(A_pr, 0, 1)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(cons_pr, 0, 1)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(lambda_pr, 0, 1)); - current_statement_begin__ = 49; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 51; - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 52; - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - current_statement_begin__ = 53; - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - current_statement_begin__ = 56; - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - current_statement_begin__ = 57; - stan::math::assign(ev, initV); - current_statement_begin__ = 59; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 61; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(theta,ev))); - current_statement_begin__ = 63; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 64; - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - } else { - - current_statement_begin__ = 66; - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - } - current_statement_begin__ = 70; - stan::math::assign(ev, stan::model::deep_copy(multiply(ev,get_base1(A,i,"A",1)))); - current_statement_begin__ = 71; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + curUtil)), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("A_pr"); - names__.push_back("alpha_pr"); - names__.push_back("cons_pr"); - names__.push_back("lambda_pr"); - names__.push_back("A"); - names__.push_back("alpha"); - names__.push_back("cons"); - names__.push_back("lambda"); - names__.push_back("mu_A"); - names__.push_back("mu_alpha"); - names__.push_back("mu_cons"); - names__.push_back("mu_lambda"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_igt_pvl_decay_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d A_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d cons_pr = in__.vector_constrain(N); - vector_d lambda_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 26; - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - current_statement_begin__ = 33; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - current_statement_begin__ = 34; - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - current_statement_begin__ = 35; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - } - - // validate transformed parameters - current_statement_begin__ = 26; - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 77; - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - - stan::math::initialize(mu_A, DUMMY_VAR__); - stan::math::fill(mu_A,DUMMY_VAR__); - current_statement_begin__ = 78; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ mu_cons; - (void) mu_cons; // dummy to suppress unused var warning - - stan::math::initialize(mu_cons, DUMMY_VAR__); - stan::math::fill(mu_cons,DUMMY_VAR__); - current_statement_begin__ = 80; - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - current_statement_begin__ = 83; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 86; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 89; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 90; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 91; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 95; - stan::math::assign(mu_A, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 96; - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 2)); - current_statement_begin__ = 97; - stan::math::assign(mu_cons, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - current_statement_begin__ = 98; - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 10)); - - current_statement_begin__ = 101; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 103; - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 104; - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - current_statement_begin__ = 105; - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - current_statement_begin__ = 108; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 109; - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - current_statement_begin__ = 110; - stan::math::assign(ev, initV); - current_statement_begin__ = 112; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 114; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(theta,ev)))), - "assigning variable log_lik"); - current_statement_begin__ = 117; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(theta,ev)), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 119; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 120; - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - } else { - - current_statement_begin__ = 122; - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - } - current_statement_begin__ = 126; - stan::math::assign(ev, stan::model::deep_copy(multiply(ev,get_base1(A,i,"A",1)))); - current_statement_begin__ = 127; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + curUtil)), - "assigning variable ev"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 77; - check_greater_or_equal(function__,"mu_A",mu_A,0); - check_less_or_equal(function__,"mu_A",mu_A,1); - current_statement_begin__ = 78; - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - current_statement_begin__ = 79; - check_greater_or_equal(function__,"mu_cons",mu_cons,0); - check_less_or_equal(function__,"mu_cons",mu_cons,5); - current_statement_begin__ = 80; - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,10); - current_statement_begin__ = 83; - current_statement_begin__ = 86; - - // write generated quantities - vars__.push_back(mu_A); - vars__.push_back(mu_alpha); - vars__.push_back(mu_cons); - vars__.push_back(mu_lambda); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_igt_pvl_decay"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_igt_pvl_delta_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_igt_pvl_delta"); - reader.add_event(131, 129, "end", "model_igt_pvl_delta"); - return reader; -} - -class model_igt_pvl_delta : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > choice; - vector_d initV; -public: - model_igt_pvl_delta(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_igt_pvl_delta(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_igt_pvl_delta_namespace::model_igt_pvl_delta"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - // initialize data variables - current_statement_begin__ = 9; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 10; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 9; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 19; - validate_non_negative_index("A_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("cons_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("lambda_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_igt_pvl_delta() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("A_pr"))) - throw std::runtime_error("variable A_pr missing"); - vals_r__ = context__.vals_r("A_pr"); - pos__ = 0U; - validate_non_negative_index("A_pr", "N", N); - context__.validate_dims("initialization", "A_pr", "vector_d", context__.to_vec(N)); - vector_d A_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - A_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(A_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("cons_pr"))) - throw std::runtime_error("variable cons_pr missing"); - vals_r__ = context__.vals_r("cons_pr"); - pos__ = 0U; - validate_non_negative_index("cons_pr", "N", N); - context__.validate_dims("initialization", "cons_pr", "vector_d", context__.to_vec(N)); - vector_d cons_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - cons_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(cons_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable cons_pr: ") + e.what()); - } - - if (!(context__.contains_r("lambda_pr"))) - throw std::runtime_error("variable lambda_pr missing"); - vals_r__ = context__.vals_r("lambda_pr"); - pos__ = 0U; - validate_non_negative_index("lambda_pr", "N", N); - context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); - vector_d lambda_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix A_pr; - (void) A_pr; // dummy to suppress unused var warning - if (jacobian__) - A_pr = in__.vector_constrain(N,lp__); - else - A_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix cons_pr; - (void) cons_pr; // dummy to suppress unused var warning - if (jacobian__) - cons_pr = in__.vector_constrain(N,lp__); - else - cons_pr = in__.vector_constrain(N); - - Eigen::Matrix lambda_pr; - (void) lambda_pr; // dummy to suppress unused var warning - if (jacobian__) - lambda_pr = in__.vector_constrain(N,lp__); - else - lambda_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 26; - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - current_statement_begin__ = 33; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - current_statement_begin__ = 34; - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - current_statement_begin__ = 35; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(A(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: A" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(cons(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: cons" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 26; - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - - // model body - - current_statement_begin__ = 40; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 41; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(A_pr, 0, 1)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(cons_pr, 0, 1)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(lambda_pr, 0, 1)); - current_statement_begin__ = 49; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 51; - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 52; - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - current_statement_begin__ = 53; - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - current_statement_begin__ = 56; - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - current_statement_begin__ = 57; - stan::math::assign(ev, initV); - current_statement_begin__ = 59; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 61; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(theta,ev))); - current_statement_begin__ = 63; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 64; - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - } else { - - current_statement_begin__ = 66; - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - } - current_statement_begin__ = 70; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * (curUtil - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("A_pr"); - names__.push_back("alpha_pr"); - names__.push_back("cons_pr"); - names__.push_back("lambda_pr"); - names__.push_back("A"); - names__.push_back("alpha"); - names__.push_back("cons"); - names__.push_back("lambda"); - names__.push_back("mu_A"); - names__.push_back("mu_alpha"); - names__.push_back("mu_cons"); - names__.push_back("mu_lambda"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_igt_pvl_delta_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d A_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d cons_pr = in__.vector_constrain(N); - vector_d lambda_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 26; - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - current_statement_begin__ = 33; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - current_statement_begin__ = 34; - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - current_statement_begin__ = 35; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - } - - // validate transformed parameters - current_statement_begin__ = 26; - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 76; - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - - stan::math::initialize(mu_A, DUMMY_VAR__); - stan::math::fill(mu_A,DUMMY_VAR__); - current_statement_begin__ = 77; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 78; - local_scalar_t__ mu_cons; - (void) mu_cons; // dummy to suppress unused var warning - - stan::math::initialize(mu_cons, DUMMY_VAR__); - stan::math::fill(mu_cons,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - current_statement_begin__ = 82; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 85; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 88; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 89; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 90; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 94; - stan::math::assign(mu_A, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 95; - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 2)); - current_statement_begin__ = 96; - stan::math::assign(mu_cons, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - current_statement_begin__ = 97; - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 10)); - - current_statement_begin__ = 100; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 102; - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 103; - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - current_statement_begin__ = 104; - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - current_statement_begin__ = 107; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 108; - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - current_statement_begin__ = 109; - stan::math::assign(ev, initV); - current_statement_begin__ = 111; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 113; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(theta,ev)))), - "assigning variable log_lik"); - current_statement_begin__ = 116; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(theta,ev)), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 118; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 119; - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - } else { - - current_statement_begin__ = 121; - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - } - current_statement_begin__ = 125; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * (curUtil - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))))), - "assigning variable ev"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 76; - check_greater_or_equal(function__,"mu_A",mu_A,0); - check_less_or_equal(function__,"mu_A",mu_A,1); - current_statement_begin__ = 77; - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - current_statement_begin__ = 78; - check_greater_or_equal(function__,"mu_cons",mu_cons,0); - check_less_or_equal(function__,"mu_cons",mu_cons,5); - current_statement_begin__ = 79; - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,10); - current_statement_begin__ = 82; - current_statement_begin__ = 85; - - // write generated quantities - vars__.push_back(mu_A); - vars__.push_back(mu_alpha); - vars__.push_back(mu_cons); - vars__.push_back(mu_lambda); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_igt_pvl_delta"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_igt_vpp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_igt_vpp"); - reader.add_event(187, 185, "end", "model_igt_vpp"); - return reader; -} - -class model_igt_vpp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > outcome; - vector > choice; - vector_d initV; -public: - model_igt_vpp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_igt_vpp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_igt_vpp_namespace::model_igt_vpp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - // initialize data variables - current_statement_begin__ = 10; - validate_non_negative_index("initV", "4", 4); - initV = vector_d(static_cast(4)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 11; - stan::math::assign(initV, rep_vector(0.0,4)); - - // validate transformed data - current_statement_begin__ = 10; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 17; - validate_non_negative_index("mu_p", "8", 8); - num_params_r__ += 8; - current_statement_begin__ = 18; - validate_non_negative_index("sigma", "8", 8); - num_params_r__ += 8; - current_statement_begin__ = 21; - validate_non_negative_index("A_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("cons_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("lambda_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 25; - validate_non_negative_index("epP_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 26; - validate_non_negative_index("epN_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 27; - validate_non_negative_index("K_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 28; - validate_non_negative_index("w_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_igt_vpp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "8", 8); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(8)); - vector_d mu_p(static_cast(8)); - for (int j1__ = 0U; j1__ < 8; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "8", 8); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(8)); - vector_d sigma(static_cast(8)); - for (int j1__ = 0U; j1__ < 8; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("A_pr"))) - throw std::runtime_error("variable A_pr missing"); - vals_r__ = context__.vals_r("A_pr"); - pos__ = 0U; - validate_non_negative_index("A_pr", "N", N); - context__.validate_dims("initialization", "A_pr", "vector_d", context__.to_vec(N)); - vector_d A_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - A_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(A_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable A_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("cons_pr"))) - throw std::runtime_error("variable cons_pr missing"); - vals_r__ = context__.vals_r("cons_pr"); - pos__ = 0U; - validate_non_negative_index("cons_pr", "N", N); - context__.validate_dims("initialization", "cons_pr", "vector_d", context__.to_vec(N)); - vector_d cons_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - cons_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(cons_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable cons_pr: ") + e.what()); - } - - if (!(context__.contains_r("lambda_pr"))) - throw std::runtime_error("variable lambda_pr missing"); - vals_r__ = context__.vals_r("lambda_pr"); - pos__ = 0U; - validate_non_negative_index("lambda_pr", "N", N); - context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); - vector_d lambda_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); - } - - if (!(context__.contains_r("epP_pr"))) - throw std::runtime_error("variable epP_pr missing"); - vals_r__ = context__.vals_r("epP_pr"); - pos__ = 0U; - validate_non_negative_index("epP_pr", "N", N); - context__.validate_dims("initialization", "epP_pr", "vector_d", context__.to_vec(N)); - vector_d epP_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - epP_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(epP_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable epP_pr: ") + e.what()); - } - - if (!(context__.contains_r("epN_pr"))) - throw std::runtime_error("variable epN_pr missing"); - vals_r__ = context__.vals_r("epN_pr"); - pos__ = 0U; - validate_non_negative_index("epN_pr", "N", N); - context__.validate_dims("initialization", "epN_pr", "vector_d", context__.to_vec(N)); - vector_d epN_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - epN_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(epN_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable epN_pr: ") + e.what()); - } - - if (!(context__.contains_r("K_pr"))) - throw std::runtime_error("variable K_pr missing"); - vals_r__ = context__.vals_r("K_pr"); - pos__ = 0U; - validate_non_negative_index("K_pr", "N", N); - context__.validate_dims("initialization", "K_pr", "vector_d", context__.to_vec(N)); - vector_d K_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - K_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(K_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable K_pr: ") + e.what()); - } - - if (!(context__.contains_r("w_pr"))) - throw std::runtime_error("variable w_pr missing"); - vals_r__ = context__.vals_r("w_pr"); - pos__ = 0U; - validate_non_negative_index("w_pr", "N", N); - context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); - vector_d w_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(8,lp__); - else - mu_p = in__.vector_constrain(8); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,8,lp__); - else - sigma = in__.vector_lb_constrain(0,8); - - Eigen::Matrix A_pr; - (void) A_pr; // dummy to suppress unused var warning - if (jacobian__) - A_pr = in__.vector_constrain(N,lp__); - else - A_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix cons_pr; - (void) cons_pr; // dummy to suppress unused var warning - if (jacobian__) - cons_pr = in__.vector_constrain(N,lp__); - else - cons_pr = in__.vector_constrain(N); - - Eigen::Matrix lambda_pr; - (void) lambda_pr; // dummy to suppress unused var warning - if (jacobian__) - lambda_pr = in__.vector_constrain(N,lp__); - else - lambda_pr = in__.vector_constrain(N); - - Eigen::Matrix epP_pr; - (void) epP_pr; // dummy to suppress unused var warning - if (jacobian__) - epP_pr = in__.vector_constrain(N,lp__); - else - epP_pr = in__.vector_constrain(N); - - Eigen::Matrix epN_pr; - (void) epN_pr; // dummy to suppress unused var warning - if (jacobian__) - epN_pr = in__.vector_constrain(N,lp__); - else - epN_pr = in__.vector_constrain(N); - - Eigen::Matrix K_pr; - (void) K_pr; // dummy to suppress unused var warning - if (jacobian__) - K_pr = in__.vector_constrain(N,lp__); - else - K_pr = in__.vector_constrain(N); - - Eigen::Matrix w_pr; - (void) w_pr; // dummy to suppress unused var warning - if (jacobian__) - w_pr = in__.vector_constrain(N,lp__); - else - w_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 33; - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - current_statement_begin__ = 36; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("epP", "N", N); - Eigen::Matrix epP(static_cast(N)); - (void) epP; // dummy to suppress unused var warning - - stan::math::initialize(epP, DUMMY_VAR__); - stan::math::fill(epP,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("epN", "N", N); - Eigen::Matrix epN(static_cast(N)); - (void) epN; // dummy to suppress unused var warning - - stan::math::initialize(epN, DUMMY_VAR__); - stan::math::fill(epN,DUMMY_VAR__); - current_statement_begin__ = 39; - validate_non_negative_index("K", "N", N); - Eigen::Matrix K(static_cast(N)); - (void) K; // dummy to suppress unused var warning - - stan::math::initialize(K, DUMMY_VAR__); - stan::math::fill(K,DUMMY_VAR__); - current_statement_begin__ = 40; - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - current_statement_begin__ = 42; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 43; - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - current_statement_begin__ = 44; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - current_statement_begin__ = 45; - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - current_statement_begin__ = 46; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - current_statement_begin__ = 47; - stan::model::assign(K, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,7,"mu_p",1) + (get_base1(sigma,7,"sigma",1) * get_base1(K_pr,i,"K_pr",1)))), - "assigning variable K"); - current_statement_begin__ = 48; - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,8,"mu_p",1) + (get_base1(sigma,8,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - current_statement_begin__ = 50; - stan::math::assign(epP, add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),epP_pr))); - current_statement_begin__ = 51; - stan::math::assign(epN, add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),epN_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(A(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: A" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(cons(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: cons" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(epP(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: epP" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(epN(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: epN" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(K(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: K" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 33; - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - current_statement_begin__ = 36; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - current_statement_begin__ = 37; - current_statement_begin__ = 38; - current_statement_begin__ = 39; - check_greater_or_equal(function__,"K",K,0); - check_less_or_equal(function__,"K",K,1); - current_statement_begin__ = 40; - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // model body - - current_statement_begin__ = 56; - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 4), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 58; - lp_accum__.add(cauchy_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(5, 6), stan::model::nil_index_list()), "sigma"), 0, 1.0)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(7, 8), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 62; - lp_accum__.add(normal_log(A_pr, 0, 1.0)); - current_statement_begin__ = 63; - lp_accum__.add(normal_log(alpha_pr, 0, 1.0)); - current_statement_begin__ = 64; - lp_accum__.add(normal_log(cons_pr, 0, 1.0)); - current_statement_begin__ = 65; - lp_accum__.add(normal_log(lambda_pr, 0, 1.0)); - current_statement_begin__ = 66; - lp_accum__.add(normal_log(epP_pr, 0, 1.0)); - current_statement_begin__ = 67; - lp_accum__.add(normal_log(epN_pr, 0, 1.0)); - current_statement_begin__ = 68; - lp_accum__.add(normal_log(K_pr, 0, 1.0)); - current_statement_begin__ = 69; - lp_accum__.add(normal_log(w_pr, 0, 1.0)); - current_statement_begin__ = 71; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 73; - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 74; - validate_non_negative_index("p_next", "4", 4); - Eigen::Matrix p_next(static_cast(4)); - (void) p_next; // dummy to suppress unused var warning - - stan::math::initialize(p_next, DUMMY_VAR__); - stan::math::fill(p_next,DUMMY_VAR__); - current_statement_begin__ = 75; - validate_non_negative_index("str", "4", 4); - Eigen::Matrix str(static_cast(4)); - (void) str; // dummy to suppress unused var warning - - stan::math::initialize(str, DUMMY_VAR__); - stan::math::fill(str,DUMMY_VAR__); - current_statement_begin__ = 76; - validate_non_negative_index("pers", "4", 4); - Eigen::Matrix pers(static_cast(4)); - (void) pers; // dummy to suppress unused var warning - - stan::math::initialize(pers, DUMMY_VAR__); - stan::math::fill(pers,DUMMY_VAR__); - current_statement_begin__ = 77; - validate_non_negative_index("V", "4", 4); - Eigen::Matrix V(static_cast(4)); - (void) V; // dummy to suppress unused var warning - - stan::math::initialize(V, DUMMY_VAR__); - stan::math::fill(V,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - current_statement_begin__ = 80; - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - current_statement_begin__ = 83; - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - current_statement_begin__ = 84; - stan::math::assign(ev, initV); - current_statement_begin__ = 85; - stan::math::assign(pers, initV); - current_statement_begin__ = 86; - stan::math::assign(V, initV); - current_statement_begin__ = 88; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 90; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(theta,V))); - current_statement_begin__ = 93; - stan::math::assign(pers, stan::model::deep_copy(multiply(pers,get_base1(K,i,"K",1)))); - current_statement_begin__ = 95; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 96; - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - current_statement_begin__ = 97; - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(pers,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"pers",1) + get_base1(epP,i,"epP",1))), - "assigning variable pers"); - } else { - - current_statement_begin__ = 99; - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - current_statement_begin__ = 100; - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(pers,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"pers",1) + get_base1(epN,i,"epN",1))), - "assigning variable pers"); - } - current_statement_begin__ = 103; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * (curUtil - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))))), - "assigning variable ev"); - current_statement_begin__ = 105; - stan::math::assign(V, add(multiply(get_base1(w,i,"w",1),ev),multiply((1 - get_base1(w,i,"w",1)),pers))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("A_pr"); - names__.push_back("alpha_pr"); - names__.push_back("cons_pr"); - names__.push_back("lambda_pr"); - names__.push_back("epP_pr"); - names__.push_back("epN_pr"); - names__.push_back("K_pr"); - names__.push_back("w_pr"); - names__.push_back("A"); - names__.push_back("alpha"); - names__.push_back("cons"); - names__.push_back("lambda"); - names__.push_back("epP"); - names__.push_back("epN"); - names__.push_back("K"); - names__.push_back("w"); - names__.push_back("mu_A"); - names__.push_back("mu_alpha"); - names__.push_back("mu_cons"); - names__.push_back("mu_lambda"); - names__.push_back("mu_epP"); - names__.push_back("mu_epN"); - names__.push_back("mu_K"); - names__.push_back("mu_w"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(8); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(8); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_igt_vpp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(8); - vector_d sigma = in__.vector_lb_constrain(0,8); - vector_d A_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d cons_pr = in__.vector_constrain(N); - vector_d lambda_pr = in__.vector_constrain(N); - vector_d epP_pr = in__.vector_constrain(N); - vector_d epN_pr = in__.vector_constrain(N); - vector_d K_pr = in__.vector_constrain(N); - vector_d w_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 8; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 8; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(epP_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(epN_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(K_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 33; - validate_non_negative_index("A", "N", N); - Eigen::Matrix A(static_cast(N)); - (void) A; // dummy to suppress unused var warning - - stan::math::initialize(A, DUMMY_VAR__); - stan::math::fill(A,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("cons", "N", N); - Eigen::Matrix cons(static_cast(N)); - (void) cons; // dummy to suppress unused var warning - - stan::math::initialize(cons, DUMMY_VAR__); - stan::math::fill(cons,DUMMY_VAR__); - current_statement_begin__ = 36; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("epP", "N", N); - Eigen::Matrix epP(static_cast(N)); - (void) epP; // dummy to suppress unused var warning - - stan::math::initialize(epP, DUMMY_VAR__); - stan::math::fill(epP,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("epN", "N", N); - Eigen::Matrix epN(static_cast(N)); - (void) epN; // dummy to suppress unused var warning - - stan::math::initialize(epN, DUMMY_VAR__); - stan::math::fill(epN,DUMMY_VAR__); - current_statement_begin__ = 39; - validate_non_negative_index("K", "N", N); - Eigen::Matrix K(static_cast(N)); - (void) K; // dummy to suppress unused var warning - - stan::math::initialize(K, DUMMY_VAR__); - stan::math::fill(K,DUMMY_VAR__); - current_statement_begin__ = 40; - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - current_statement_begin__ = 42; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 43; - stan::model::assign(A, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(A_pr,i,"A_pr",1)))), - "assigning variable A"); - current_statement_begin__ = 44; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 2), - "assigning variable alpha"); - current_statement_begin__ = 45; - stan::model::assign(cons, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(cons_pr,i,"cons_pr",1)))) * 5), - "assigning variable cons"); - current_statement_begin__ = 46; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))) * 10), - "assigning variable lambda"); - current_statement_begin__ = 47; - stan::model::assign(K, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,7,"mu_p",1) + (get_base1(sigma,7,"sigma",1) * get_base1(K_pr,i,"K_pr",1)))), - "assigning variable K"); - current_statement_begin__ = 48; - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,8,"mu_p",1) + (get_base1(sigma,8,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - current_statement_begin__ = 50; - stan::math::assign(epP, add(get_base1(mu_p,5,"mu_p",1),multiply(get_base1(sigma,5,"sigma",1),epP_pr))); - current_statement_begin__ = 51; - stan::math::assign(epN, add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),epN_pr))); - - // validate transformed parameters - current_statement_begin__ = 33; - check_greater_or_equal(function__,"A",A,0); - check_less_or_equal(function__,"A",A,1); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"alpha",alpha,0); - check_less_or_equal(function__,"alpha",alpha,2); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"cons",cons,0); - check_less_or_equal(function__,"cons",cons,5); - current_statement_begin__ = 36; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,10); - current_statement_begin__ = 37; - current_statement_begin__ = 38; - current_statement_begin__ = 39; - check_greater_or_equal(function__,"K",K,0); - check_less_or_equal(function__,"K",K,1); - current_statement_begin__ = 40; - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(A[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(cons[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(epP[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(epN[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(K[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 111; - local_scalar_t__ mu_A; - (void) mu_A; // dummy to suppress unused var warning - - stan::math::initialize(mu_A, DUMMY_VAR__); - stan::math::fill(mu_A,DUMMY_VAR__); - current_statement_begin__ = 112; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 113; - local_scalar_t__ mu_cons; - (void) mu_cons; // dummy to suppress unused var warning - - stan::math::initialize(mu_cons, DUMMY_VAR__); - stan::math::fill(mu_cons,DUMMY_VAR__); - current_statement_begin__ = 114; - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - current_statement_begin__ = 115; - local_scalar_t__ mu_epP; - (void) mu_epP; // dummy to suppress unused var warning - - stan::math::initialize(mu_epP, DUMMY_VAR__); - stan::math::fill(mu_epP,DUMMY_VAR__); - current_statement_begin__ = 116; - local_scalar_t__ mu_epN; - (void) mu_epN; // dummy to suppress unused var warning - - stan::math::initialize(mu_epN, DUMMY_VAR__); - stan::math::fill(mu_epN,DUMMY_VAR__); - current_statement_begin__ = 117; - local_scalar_t__ mu_K; - (void) mu_K; // dummy to suppress unused var warning - - stan::math::initialize(mu_K, DUMMY_VAR__); - stan::math::fill(mu_K,DUMMY_VAR__); - current_statement_begin__ = 118; - local_scalar_t__ mu_w; - (void) mu_w; // dummy to suppress unused var warning - - stan::math::initialize(mu_w, DUMMY_VAR__); - stan::math::fill(mu_w,DUMMY_VAR__); - current_statement_begin__ = 121; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 124; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 127; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 128; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 129; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 133; - stan::math::assign(mu_A, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 134; - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 2)); - current_statement_begin__ = 135; - stan::math::assign(mu_cons, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - current_statement_begin__ = 136; - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 10)); - current_statement_begin__ = 137; - stan::math::assign(mu_epP, get_base1(mu_p,5,"mu_p",1)); - current_statement_begin__ = 138; - stan::math::assign(mu_epN, get_base1(mu_p,6,"mu_p",1)); - current_statement_begin__ = 139; - stan::math::assign(mu_K, Phi_approx(get_base1(mu_p,7,"mu_p",1))); - current_statement_begin__ = 140; - stan::math::assign(mu_w, Phi_approx(get_base1(mu_p,8,"mu_p",1))); - - current_statement_begin__ = 143; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 145; - validate_non_negative_index("ev", "4", 4); - Eigen::Matrix ev(static_cast(4)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 146; - validate_non_negative_index("p_next", "4", 4); - Eigen::Matrix p_next(static_cast(4)); - (void) p_next; // dummy to suppress unused var warning - - stan::math::initialize(p_next, DUMMY_VAR__); - stan::math::fill(p_next,DUMMY_VAR__); - current_statement_begin__ = 147; - validate_non_negative_index("str", "4", 4); - Eigen::Matrix str(static_cast(4)); - (void) str; // dummy to suppress unused var warning - - stan::math::initialize(str, DUMMY_VAR__); - stan::math::fill(str,DUMMY_VAR__); - current_statement_begin__ = 148; - validate_non_negative_index("pers", "4", 4); - Eigen::Matrix pers(static_cast(4)); - (void) pers; // dummy to suppress unused var warning - - stan::math::initialize(pers, DUMMY_VAR__); - stan::math::fill(pers,DUMMY_VAR__); - current_statement_begin__ = 149; - validate_non_negative_index("V", "4", 4); - Eigen::Matrix V(static_cast(4)); - (void) V; // dummy to suppress unused var warning - - stan::math::initialize(V, DUMMY_VAR__); - stan::math::fill(V,DUMMY_VAR__); - current_statement_begin__ = 151; - local_scalar_t__ curUtil; - (void) curUtil; // dummy to suppress unused var warning - - stan::math::initialize(curUtil, DUMMY_VAR__); - stan::math::fill(curUtil,DUMMY_VAR__); - current_statement_begin__ = 152; - local_scalar_t__ theta; - (void) theta; // dummy to suppress unused var warning - - stan::math::initialize(theta, DUMMY_VAR__); - stan::math::fill(theta,DUMMY_VAR__); - - - current_statement_begin__ = 155; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 156; - stan::math::assign(theta, (pow(3,get_base1(cons,i,"cons",1)) - 1)); - current_statement_begin__ = 157; - stan::math::assign(ev, initV); - current_statement_begin__ = 158; - stan::math::assign(pers, initV); - current_statement_begin__ = 159; - stan::math::assign(V, initV); - current_statement_begin__ = 161; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 163; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(theta,V)))), - "assigning variable log_lik"); - current_statement_begin__ = 166; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(theta,V)), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 169; - stan::math::assign(pers, stan::model::deep_copy(multiply(pers,get_base1(K,i,"K",1)))); - current_statement_begin__ = 171; - if (as_bool(logical_gte(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - - current_statement_begin__ = 172; - stan::math::assign(curUtil, pow(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),get_base1(alpha,i,"alpha",1))); - current_statement_begin__ = 173; - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(pers,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"pers",1) + get_base1(epP,i,"epP",1))), - "assigning variable pers"); - } else { - - current_statement_begin__ = 175; - stan::math::assign(curUtil, ((-(1) * get_base1(lambda,i,"lambda",1)) * pow((-(1) * get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)),get_base1(alpha,i,"alpha",1)))); - current_statement_begin__ = 176; - stan::model::assign(pers, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(pers,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"pers",1) + get_base1(epN,i,"epN",1))), - "assigning variable pers"); - } - current_statement_begin__ = 179; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(A,i,"A",1) * (curUtil - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))))), - "assigning variable ev"); - current_statement_begin__ = 181; - stan::math::assign(V, add(multiply(get_base1(w,i,"w",1),ev),multiply((1 - get_base1(w,i,"w",1)),pers))); - } - } - } - - // validate generated quantities - current_statement_begin__ = 111; - check_greater_or_equal(function__,"mu_A",mu_A,0); - check_less_or_equal(function__,"mu_A",mu_A,1); - current_statement_begin__ = 112; - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,2); - current_statement_begin__ = 113; - check_greater_or_equal(function__,"mu_cons",mu_cons,0); - check_less_or_equal(function__,"mu_cons",mu_cons,5); - current_statement_begin__ = 114; - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,10); - current_statement_begin__ = 115; - current_statement_begin__ = 116; - current_statement_begin__ = 117; - check_greater_or_equal(function__,"mu_K",mu_K,0); - check_less_or_equal(function__,"mu_K",mu_K,1); - current_statement_begin__ = 118; - check_greater_or_equal(function__,"mu_w",mu_w,0); - check_less_or_equal(function__,"mu_w",mu_w,1); - current_statement_begin__ = 121; - current_statement_begin__ = 124; - - // write generated quantities - vars__.push_back(mu_A); - vars__.push_back(mu_alpha); - vars__.push_back(mu_cons); - vars__.push_back(mu_lambda); - vars__.push_back(mu_epP); - vars__.push_back(mu_epN); - vars__.push_back(mu_K); - vars__.push_back(mu_w); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_igt_vpp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 8; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 8; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epP_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epN_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epP" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epN" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_epP"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_epN"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_K"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 8; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 8; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epP_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epN_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "A" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "cons" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epP" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "epN" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "K" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_A"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_cons"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_epP"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_epN"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_K"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_peer_ocu_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_peer_ocu"); - reader.add_event(114, 112, "end", "model_peer_ocu"); - return reader; -} - -class model_peer_ocu : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > condition; - vector > safe_Hpayoff; - vector > safe_Lpayoff; - vector > risky_Hpayoff; - vector > risky_Lpayoff; - vector > p_gamble; -public: - model_peer_ocu(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_peer_ocu(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_peer_ocu_namespace::model_peer_ocu"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("condition", "N", N); - validate_non_negative_index("condition", "T", T); - context__.validate_dims("data initialization", "condition", "int", context__.to_vec(N,T)); - validate_non_negative_index("condition", "N", N); - validate_non_negative_index("condition", "T", T); - condition = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("condition"); - pos__ = 0; - size_t condition_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < condition_limit_1__; ++i_1__) { - size_t condition_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < condition_limit_0__; ++i_0__) { - condition[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("safe_Hpayoff", "N", N); - validate_non_negative_index("safe_Hpayoff", "T", T); - context__.validate_dims("data initialization", "safe_Hpayoff", "double", context__.to_vec(N,T)); - validate_non_negative_index("safe_Hpayoff", "N", N); - validate_non_negative_index("safe_Hpayoff", "T", T); - safe_Hpayoff = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("safe_Hpayoff"); - pos__ = 0; - size_t safe_Hpayoff_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < safe_Hpayoff_limit_1__; ++i_1__) { - size_t safe_Hpayoff_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < safe_Hpayoff_limit_0__; ++i_0__) { - safe_Hpayoff[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("safe_Lpayoff", "N", N); - validate_non_negative_index("safe_Lpayoff", "T", T); - context__.validate_dims("data initialization", "safe_Lpayoff", "double", context__.to_vec(N,T)); - validate_non_negative_index("safe_Lpayoff", "N", N); - validate_non_negative_index("safe_Lpayoff", "T", T); - safe_Lpayoff = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("safe_Lpayoff"); - pos__ = 0; - size_t safe_Lpayoff_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < safe_Lpayoff_limit_1__; ++i_1__) { - size_t safe_Lpayoff_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < safe_Lpayoff_limit_0__; ++i_0__) { - safe_Lpayoff[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 9; - validate_non_negative_index("risky_Hpayoff", "N", N); - validate_non_negative_index("risky_Hpayoff", "T", T); - context__.validate_dims("data initialization", "risky_Hpayoff", "double", context__.to_vec(N,T)); - validate_non_negative_index("risky_Hpayoff", "N", N); - validate_non_negative_index("risky_Hpayoff", "T", T); - risky_Hpayoff = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("risky_Hpayoff"); - pos__ = 0; - size_t risky_Hpayoff_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < risky_Hpayoff_limit_1__; ++i_1__) { - size_t risky_Hpayoff_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < risky_Hpayoff_limit_0__; ++i_0__) { - risky_Hpayoff[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 10; - validate_non_negative_index("risky_Lpayoff", "N", N); - validate_non_negative_index("risky_Lpayoff", "T", T); - context__.validate_dims("data initialization", "risky_Lpayoff", "double", context__.to_vec(N,T)); - validate_non_negative_index("risky_Lpayoff", "N", N); - validate_non_negative_index("risky_Lpayoff", "T", T); - risky_Lpayoff = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("risky_Lpayoff"); - pos__ = 0; - size_t risky_Lpayoff_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < risky_Lpayoff_limit_1__; ++i_1__) { - size_t risky_Lpayoff_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < risky_Lpayoff_limit_0__; ++i_0__) { - risky_Lpayoff[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 11; - validate_non_negative_index("p_gamble", "N", N); - validate_non_negative_index("p_gamble", "T", T); - context__.validate_dims("data initialization", "p_gamble", "double", context__.to_vec(N,T)); - validate_non_negative_index("p_gamble", "N", N); - validate_non_negative_index("p_gamble", "T", T); - p_gamble = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("p_gamble"); - pos__ = 0; - size_t p_gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < p_gamble_limit_1__; ++i_1__) { - size_t p_gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < p_gamble_limit_0__; ++i_0__) { - p_gamble[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"condition[k0__][k1__]",condition[k0__][k1__],0); - check_less_or_equal(function__,"condition[k0__][k1__]",condition[k0__][k1__],3); - } - } - current_statement_begin__ = 7; - current_statement_begin__ = 8; - current_statement_begin__ = 9; - current_statement_begin__ = 10; - current_statement_begin__ = 11; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"p_gamble[k0__][k1__]",p_gamble[k0__][k1__],0); - check_less_or_equal(function__,"p_gamble[k0__][k1__]",p_gamble[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 18; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 19; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 20; - validate_non_negative_index("rho_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("ocu_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_peer_ocu() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("rho_p"))) - throw std::runtime_error("variable rho_p missing"); - vals_r__ = context__.vals_r("rho_p"); - pos__ = 0U; - validate_non_negative_index("rho_p", "N", N); - context__.validate_dims("initialization", "rho_p", "vector_d", context__.to_vec(N)); - vector_d rho_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - if (!(context__.contains_r("ocu_p"))) - throw std::runtime_error("variable ocu_p missing"); - vals_r__ = context__.vals_r("ocu_p"); - pos__ = 0U; - validate_non_negative_index("ocu_p", "N", N); - context__.validate_dims("initialization", "ocu_p", "vector_d", context__.to_vec(N)); - vector_d ocu_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ocu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ocu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ocu_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix rho_p; - (void) rho_p; // dummy to suppress unused var warning - if (jacobian__) - rho_p = in__.vector_constrain(N,lp__); - else - rho_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - Eigen::Matrix ocu_p; - (void) ocu_p; // dummy to suppress unused var warning - if (jacobian__) - ocu_p = in__.vector_constrain(N,lp__); - else - ocu_p = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 26; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("ocu", "N", N); - Eigen::Matrix ocu(static_cast(N)); - (void) ocu; // dummy to suppress unused var warning - - stan::math::initialize(ocu, DUMMY_VAR__); - stan::math::fill(ocu,DUMMY_VAR__); - - - current_statement_begin__ = 30; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 31; - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - } - current_statement_begin__ = 33; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - current_statement_begin__ = 34; - stan::math::assign(ocu, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),ocu_p))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ocu(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ocu" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 26; - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"tau",tau,0); - current_statement_begin__ = 28; - - // model body - - current_statement_begin__ = 40; - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - current_statement_begin__ = 41; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 42; - lp_accum__.add(cauchy_log(get_base1(sigma,3,"sigma",1), 0, 1.0)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(rho_p, 0, 1.0)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(tau_p, 0, 1.0)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(ocu_p, 0, 1.0)); - current_statement_begin__ = 49; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 50; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 51; - local_scalar_t__ U_safe; - (void) U_safe; // dummy to suppress unused var warning - - stan::math::initialize(U_safe, DUMMY_VAR__); - stan::math::fill(U_safe,DUMMY_VAR__); - current_statement_begin__ = 52; - local_scalar_t__ U_risky; - (void) U_risky; // dummy to suppress unused var warning - - stan::math::initialize(U_risky, DUMMY_VAR__); - stan::math::fill(U_risky,DUMMY_VAR__); - - - current_statement_begin__ = 54; - stan::math::assign(U_safe, ((get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2) * pow(get_base1(get_base1(safe_Hpayoff,i,"safe_Hpayoff",1),t,"safe_Hpayoff",2),get_base1(rho,i,"rho",1))) + ((1 - get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2)) * pow(get_base1(get_base1(safe_Lpayoff,i,"safe_Lpayoff",1),t,"safe_Lpayoff",2),get_base1(rho,i,"rho",1))))); - current_statement_begin__ = 55; - stan::math::assign(U_risky, ((get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2) * pow(get_base1(get_base1(risky_Hpayoff,i,"risky_Hpayoff",1),t,"risky_Hpayoff",2),get_base1(rho,i,"rho",1))) + ((1 - get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2)) * pow(get_base1(get_base1(risky_Lpayoff,i,"risky_Lpayoff",1),t,"risky_Lpayoff",2),get_base1(rho,i,"rho",1))))); - current_statement_begin__ = 56; - if (as_bool(logical_eq(get_base1(get_base1(condition,i,"condition",1),t,"condition",2),1))) { - - current_statement_begin__ = 57; - stan::math::assign(U_safe, stan::model::deep_copy((U_safe + get_base1(ocu,i,"ocu",1)))); - } - current_statement_begin__ = 59; - if (as_bool(logical_eq(get_base1(get_base1(condition,i,"condition",1),t,"condition",2),3))) { - - current_statement_begin__ = 60; - stan::math::assign(U_risky, stan::model::deep_copy((U_risky + get_base1(ocu,i,"ocu",1)))); - } - current_statement_begin__ = 62; - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), (get_base1(tau,i,"tau",1) * (U_risky - U_safe)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("rho_p"); - names__.push_back("tau_p"); - names__.push_back("ocu_p"); - names__.push_back("rho"); - names__.push_back("tau"); - names__.push_back("ocu"); - names__.push_back("mu_rho"); - names__.push_back("mu_tau"); - names__.push_back("mu_ocu"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_peer_ocu_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d rho_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - vector_d ocu_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ocu_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 26; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("ocu", "N", N); - Eigen::Matrix ocu(static_cast(N)); - (void) ocu; // dummy to suppress unused var warning - - stan::math::initialize(ocu, DUMMY_VAR__); - stan::math::fill(ocu,DUMMY_VAR__); - - - current_statement_begin__ = 30; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 31; - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - } - current_statement_begin__ = 33; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - current_statement_begin__ = 34; - stan::math::assign(ocu, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),ocu_p))); - - // validate transformed parameters - current_statement_begin__ = 26; - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"tau",tau,0); - current_statement_begin__ = 28; - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ocu[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 67; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 68; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - current_statement_begin__ = 69; - local_scalar_t__ mu_ocu; - (void) mu_ocu; // dummy to suppress unused var warning - - stan::math::initialize(mu_ocu, DUMMY_VAR__); - stan::math::fill(mu_ocu,DUMMY_VAR__); - current_statement_begin__ = 72; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 75; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 78; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 79; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 80; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 84; - stan::math::assign(mu_rho, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - current_statement_begin__ = 85; - stan::math::assign(mu_tau, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 86; - stan::math::assign(mu_ocu, get_base1(mu_p,3,"mu_p",1)); - - current_statement_begin__ = 89; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 92; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0.0, - "assigning variable log_lik"); - current_statement_begin__ = 94; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 95; - local_scalar_t__ U_safe; - (void) U_safe; // dummy to suppress unused var warning - - stan::math::initialize(U_safe, DUMMY_VAR__); - stan::math::fill(U_safe,DUMMY_VAR__); - current_statement_begin__ = 96; - local_scalar_t__ U_risky; - (void) U_risky; // dummy to suppress unused var warning - - stan::math::initialize(U_risky, DUMMY_VAR__); - stan::math::fill(U_risky,DUMMY_VAR__); - - - current_statement_begin__ = 98; - stan::math::assign(U_safe, ((get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2) * pow(get_base1(get_base1(safe_Hpayoff,i,"safe_Hpayoff",1),t,"safe_Hpayoff",2),get_base1(rho,i,"rho",1))) + ((1 - get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2)) * pow(get_base1(get_base1(safe_Lpayoff,i,"safe_Lpayoff",1),t,"safe_Lpayoff",2),get_base1(rho,i,"rho",1))))); - current_statement_begin__ = 99; - stan::math::assign(U_risky, ((get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2) * pow(get_base1(get_base1(risky_Hpayoff,i,"risky_Hpayoff",1),t,"risky_Hpayoff",2),get_base1(rho,i,"rho",1))) + ((1 - get_base1(get_base1(p_gamble,i,"p_gamble",1),t,"p_gamble",2)) * pow(get_base1(get_base1(risky_Lpayoff,i,"risky_Lpayoff",1),t,"risky_Lpayoff",2),get_base1(rho,i,"rho",1))))); - current_statement_begin__ = 100; - if (as_bool(logical_eq(get_base1(get_base1(condition,i,"condition",1),t,"condition",2),1))) { - - current_statement_begin__ = 101; - stan::math::assign(U_safe, stan::model::deep_copy((U_safe + get_base1(ocu,i,"ocu",1)))); - } - current_statement_begin__ = 103; - if (as_bool(logical_eq(get_base1(get_base1(condition,i,"condition",1),t,"condition",2),3))) { - - current_statement_begin__ = 104; - stan::math::assign(U_risky, stan::model::deep_copy((U_risky + get_base1(ocu,i,"ocu",1)))); - } - current_statement_begin__ = 106; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(tau,i,"tau",1) * (U_risky - U_safe))))), - "assigning variable log_lik"); - current_statement_begin__ = 108; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((get_base1(tau,i,"tau",1) * (U_risky - U_safe))), base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 67; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - check_less_or_equal(function__,"mu_rho",mu_rho,2); - current_statement_begin__ = 68; - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - current_statement_begin__ = 69; - current_statement_begin__ = 72; - current_statement_begin__ = 75; - - // write generated quantities - vars__.push_back(mu_rho); - vars__.push_back(mu_tau); - vars__.push_back(mu_ocu); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_peer_ocu"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ocu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ocu" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ocu"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ocu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ocu" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ocu"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_ewa_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_ewa"); - reader.add_event(164, 162, "end", "model_prl_ewa"); - return reader; -} - -class model_prl_ewa : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_ewa(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_ewa(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_ewa_namespace::model_prl_ewa"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 9; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 10; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 12; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 13; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 8; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 9; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 10; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 12; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - current_statement_begin__ = 13; - // initialize data variables - current_statement_begin__ = 18; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 19; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 18; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 25; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 26; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 29; - validate_non_negative_index("phi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 30; - validate_non_negative_index("rho_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 31; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_ewa() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("phi_pr"))) - throw std::runtime_error("variable phi_pr missing"); - vals_r__ = context__.vals_r("phi_pr"); - pos__ = 0U; - validate_non_negative_index("phi_pr", "N", N); - context__.validate_dims("initialization", "phi_pr", "vector_d", context__.to_vec(N)); - vector_d phi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - phi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(phi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable phi_pr: ") + e.what()); - } - - if (!(context__.contains_r("rho_pr"))) - throw std::runtime_error("variable rho_pr missing"); - vals_r__ = context__.vals_r("rho_pr"); - pos__ = 0U; - validate_non_negative_index("rho_pr", "N", N); - context__.validate_dims("initialization", "rho_pr", "vector_d", context__.to_vec(N)); - vector_d rho_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix phi_pr; - (void) phi_pr; // dummy to suppress unused var warning - if (jacobian__) - phi_pr = in__.vector_constrain(N,lp__); - else - phi_pr = in__.vector_constrain(N); - - Eigen::Matrix rho_pr; - (void) rho_pr; // dummy to suppress unused var warning - if (jacobian__) - rho_pr = in__.vector_constrain(N,lp__); - else - rho_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 36; - validate_non_negative_index("phi", "N", N); - Eigen::Matrix phi(static_cast(N)); - (void) phi; // dummy to suppress unused var warning - - stan::math::initialize(phi, DUMMY_VAR__); - stan::math::fill(phi,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 40; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 41; - stan::model::assign(phi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(phi_pr,i,"phi_pr",1)))), - "assigning variable phi"); - current_statement_begin__ = 42; - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(rho_pr,i,"rho_pr",1)))), - "assigning variable rho"); - current_statement_begin__ = 43; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(phi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: phi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 36; - check_greater_or_equal(function__,"phi",phi,0); - check_less_or_equal(function__,"phi",phi,1); - current_statement_begin__ = 37; - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,1); - current_statement_begin__ = 38; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - current_statement_begin__ = 49; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(phi_pr, 0, 1)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(rho_pr, 0, 1)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 57; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 59; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 60; - validate_non_negative_index("ew", "2", 2); - Eigen::Matrix ew(static_cast(2)); - (void) ew; // dummy to suppress unused var warning - - stan::math::initialize(ew, DUMMY_VAR__); - stan::math::fill(ew,DUMMY_VAR__); - current_statement_begin__ = 62; - local_scalar_t__ ewt1; - (void) ewt1; // dummy to suppress unused var warning - - stan::math::initialize(ewt1, DUMMY_VAR__); - stan::math::fill(ewt1,DUMMY_VAR__); - - - current_statement_begin__ = 65; - stan::math::assign(ev, initV); - current_statement_begin__ = 66; - stan::math::assign(ew, initV); - current_statement_begin__ = 68; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 70; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(ev,get_base1(beta,i,"beta",1)))); - current_statement_begin__ = 73; - stan::math::assign(ewt1, get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1)); - current_statement_begin__ = 76; - stan::model::assign(ew, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1) * get_base1(rho,i,"rho",1)) + 1)), - "assigning variable ew"); - current_statement_begin__ = 79; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) * get_base1(phi,i,"phi",1)) * ewt1) + get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) / get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("phi_pr"); - names__.push_back("rho_pr"); - names__.push_back("beta_pr"); - names__.push_back("phi"); - names__.push_back("rho"); - names__.push_back("beta"); - names__.push_back("mu_phi"); - names__.push_back("mu_rho"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_ew_c"); - names__.push_back("mr_ew_nc"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_ewa_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d phi_pr = in__.vector_constrain(N); - vector_d rho_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(phi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 36; - validate_non_negative_index("phi", "N", N); - Eigen::Matrix phi(static_cast(N)); - (void) phi; // dummy to suppress unused var warning - - stan::math::initialize(phi, DUMMY_VAR__); - stan::math::fill(phi,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 40; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 41; - stan::model::assign(phi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(phi_pr,i,"phi_pr",1)))), - "assigning variable phi"); - current_statement_begin__ = 42; - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(rho_pr,i,"rho_pr",1)))), - "assigning variable rho"); - current_statement_begin__ = 43; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - current_statement_begin__ = 36; - check_greater_or_equal(function__,"phi",phi,0); - check_less_or_equal(function__,"phi",phi,1); - current_statement_begin__ = 37; - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,1); - current_statement_begin__ = 38; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(phi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 86; - local_scalar_t__ mu_phi; - (void) mu_phi; // dummy to suppress unused var warning - - stan::math::initialize(mu_phi, DUMMY_VAR__); - stan::math::fill(mu_phi,DUMMY_VAR__); - current_statement_begin__ = 87; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 88; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 91; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 95; - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - current_statement_begin__ = 96; - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - current_statement_begin__ = 99; - validate_non_negative_index("mr_ew_c", "N", N); - validate_non_negative_index("mr_ew_c", "T", T); - vector > mr_ew_c(N, (vector(T))); - stan::math::initialize(mr_ew_c, DUMMY_VAR__); - stan::math::fill(mr_ew_c,DUMMY_VAR__); - current_statement_begin__ = 100; - validate_non_negative_index("mr_ew_nc", "N", N); - validate_non_negative_index("mr_ew_nc", "T", T); - vector > mr_ew_nc(N, (vector(T))); - stan::math::initialize(mr_ew_nc, DUMMY_VAR__); - stan::math::fill(mr_ew_nc,DUMMY_VAR__); - current_statement_begin__ = 103; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 106; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 107; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 108; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - current_statement_begin__ = 109; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - current_statement_begin__ = 110; - stan::model::assign(mr_ew_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ew_c"); - current_statement_begin__ = 111; - stan::model::assign(mr_ew_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ew_nc"); - current_statement_begin__ = 113; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 117; - stan::math::assign(mu_phi, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 118; - stan::math::assign(mu_rho, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 119; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - current_statement_begin__ = 122; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 124; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 125; - validate_non_negative_index("ew", "2", 2); - Eigen::Matrix ew(static_cast(2)); - (void) ew; // dummy to suppress unused var warning - - stan::math::initialize(ew, DUMMY_VAR__); - stan::math::fill(ew,DUMMY_VAR__); - current_statement_begin__ = 127; - local_scalar_t__ ewt1; - (void) ewt1; // dummy to suppress unused var warning - - stan::math::initialize(ewt1, DUMMY_VAR__); - stan::math::fill(ewt1,DUMMY_VAR__); - - - current_statement_begin__ = 130; - stan::math::assign(ev, initV); - current_statement_begin__ = 131; - stan::math::assign(ew, initV); - current_statement_begin__ = 133; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 135; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 137; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(ev,get_base1(beta,i,"beta",1))))), - "assigning variable log_lik"); - current_statement_begin__ = 140; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(ev,get_base1(beta,i,"beta",1))), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 144; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - current_statement_begin__ = 145; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - current_statement_begin__ = 148; - stan::model::assign(mr_ew_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1), - "assigning variable mr_ew_c"); - current_statement_begin__ = 149; - stan::model::assign(mr_ew_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ew,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ew",1), - "assigning variable mr_ew_nc"); - current_statement_begin__ = 152; - stan::math::assign(ewt1, get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1)); - current_statement_begin__ = 155; - stan::model::assign(ew, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1) * get_base1(rho,i,"rho",1)) + 1)), - "assigning variable ew"); - current_statement_begin__ = 158; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy(((((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) * get_base1(phi,i,"phi",1)) * ewt1) + get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) / get_base1(ew,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ew",1))), - "assigning variable ev"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 86; - check_greater_or_equal(function__,"mu_phi",mu_phi,0); - check_less_or_equal(function__,"mu_phi",mu_phi,1); - current_statement_begin__ = 87; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - check_less_or_equal(function__,"mu_rho",mu_rho,1); - current_statement_begin__ = 88; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - current_statement_begin__ = 91; - current_statement_begin__ = 95; - current_statement_begin__ = 96; - current_statement_begin__ = 99; - current_statement_begin__ = 100; - current_statement_begin__ = 103; - - // write generated quantities - vars__.push_back(mu_phi); - vars__.push_back(mu_rho); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ew_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ew_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_ewa"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_phi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ew_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ew_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "phi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_phi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ew_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ew_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious"); - reader.add_event(168, 166, "end", "model_prl_fictitious"); - return reader; -} - -class model_prl_fictitious : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_fictitious(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_namespace::model_prl_fictitious"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 9; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 10; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 11; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 12; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 8; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 9; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 10; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 11; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - current_statement_begin__ = 12; - // initialize data variables - current_statement_begin__ = 17; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 18; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 17; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 24; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 25; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 28; - validate_non_negative_index("eta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 29; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 30; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pr"))) - throw std::runtime_error("variable eta_pr missing"); - vals_r__ = context__.vals_r("eta_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pr", "N", N); - context__.validate_dims("initialization", "eta_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix eta_pr; - (void) eta_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pr = in__.vector_constrain(N,lp__); - else - eta_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 35; - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - current_statement_begin__ = 36; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 39; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 40; - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - current_statement_begin__ = 41; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - current_statement_begin__ = 43; - stan::math::assign(alpha, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),alpha_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 35; - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - current_statement_begin__ = 36; - current_statement_begin__ = 37; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - current_statement_begin__ = 48; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(get_base1(sigma,1,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 50; - lp_accum__.add(cauchy_log(get_base1(sigma,2,"sigma",1), 0, 1.0)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(get_base1(sigma,3,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(eta_pr, 0, 1)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 58; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 60; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 61; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 63; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - current_statement_begin__ = 64; - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - current_statement_begin__ = 67; - stan::math::assign(ev, initV); - current_statement_begin__ = 69; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 71; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - current_statement_begin__ = 72; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 73; - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), prob)); - current_statement_begin__ = 76; - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 77; - stan::math::assign(PEnc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - current_statement_begin__ = 80; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(eta,i,"eta",1) * PE))), - "assigning variable ev"); - current_statement_begin__ = 81; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1) + (get_base1(eta,i,"eta",1) * PEnc))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pr"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("mu_eta"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d eta_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 35; - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - current_statement_begin__ = 36; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 39; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 40; - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - current_statement_begin__ = 41; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - current_statement_begin__ = 43; - stan::math::assign(alpha, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),alpha_pr))); - - // validate transformed parameters - current_statement_begin__ = 35; - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - current_statement_begin__ = 36; - current_statement_begin__ = 37; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 88; - local_scalar_t__ mu_eta; - (void) mu_eta; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta, DUMMY_VAR__); - stan::math::fill(mu_eta,DUMMY_VAR__); - current_statement_begin__ = 89; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 90; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 93; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 96; - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - current_statement_begin__ = 97; - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - current_statement_begin__ = 99; - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "T", T); - vector > mr_pe_c(N, (vector(T))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - current_statement_begin__ = 100; - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > mr_pe_nc(N, (vector(T))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - current_statement_begin__ = 101; - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "T", T); - vector > mr_dv(N, (vector(T))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - current_statement_begin__ = 104; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 107; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 108; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 109; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - current_statement_begin__ = 110; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - current_statement_begin__ = 112; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_c"); - current_statement_begin__ = 113; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 114; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_dv"); - current_statement_begin__ = 116; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 120; - stan::math::assign(mu_eta, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 121; - stan::math::assign(mu_alpha, get_base1(mu_p,2,"mu_p",1)); - current_statement_begin__ = 122; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - - current_statement_begin__ = 125; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 127; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 128; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 130; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - current_statement_begin__ = 131; - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - current_statement_begin__ = 134; - stan::math::assign(ev, initV); - current_statement_begin__ = 136; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 138; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 140; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - current_statement_begin__ = 141; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 143; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),prob))), - "assigning variable log_lik"); - current_statement_begin__ = 146; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 149; - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 150; - stan::math::assign(PEnc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - current_statement_begin__ = 153; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - current_statement_begin__ = 154; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - current_statement_begin__ = 156; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - PE, - "assigning variable mr_pe_c"); - current_statement_begin__ = 157; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - PEnc, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 158; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - (PE - PEnc), - "assigning variable mr_dv"); - current_statement_begin__ = 161; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta,i,"eta",1) * PE)), - "assigning variable ev"); - current_statement_begin__ = 162; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta,i,"eta",1) * PEnc)), - "assigning variable ev"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 88; - check_greater_or_equal(function__,"mu_eta",mu_eta,0); - check_less_or_equal(function__,"mu_eta",mu_eta,1); - current_statement_begin__ = 89; - current_statement_begin__ = 90; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - current_statement_begin__ = 93; - current_statement_begin__ = 96; - current_statement_begin__ = 97; - current_statement_begin__ = 99; - current_statement_begin__ = 100; - current_statement_begin__ = 101; - current_statement_begin__ = 104; - - // write generated quantities - vars__.push_back(mu_eta); - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_multipleB_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious_multipleB"); - reader.add_event(178, 176, "end", "model_prl_fictitious_multipleB"); - return reader; -} - -class model_prl_fictitious_multipleB : public prob_grad { -private: - int N; - int T; - int maxB; - vector B; - vector > Tsubj; - vector > > choice; - vector > > outcome; - vector_d initV; -public: - model_prl_fictitious_multipleB(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious_multipleB(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_multipleB_namespace::model_prl_fictitious_multipleB"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 9; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 10; - context__.validate_dims("data initialization", "maxB", "int", context__.to_vec()); - maxB = int(0); - vals_i__ = context__.vals_i("maxB"); - pos__ = 0; - maxB = vals_i__[pos__++]; - current_statement_begin__ = 11; - validate_non_negative_index("B", "N", N); - context__.validate_dims("data initialization", "B", "int", context__.to_vec(N)); - validate_non_negative_index("B", "N", N); - B = std::vector(N,int(0)); - vals_i__ = context__.vals_i("B"); - pos__ = 0; - size_t B_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < B_limit_0__; ++i_0__) { - B[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 12; - validate_non_negative_index("Tsubj", "N", N); - validate_non_negative_index("Tsubj", "maxB", maxB); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N,maxB)); - validate_non_negative_index("Tsubj", "N", N); - validate_non_negative_index("Tsubj", "maxB", maxB); - Tsubj = std::vector >(N,std::vector(maxB,int(0))); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < Tsubj_limit_1__; ++i_1__) { - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 14; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "maxB", maxB); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,maxB,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "maxB", maxB); - validate_non_negative_index("choice", "T", T); - choice = std::vector > >(N,std::vector >(maxB,std::vector(T,int(0)))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < choice_limit_2__; ++i_2__) { - size_t choice_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__][i_2__] = vals_i__[pos__++]; - } - } - } - current_statement_begin__ = 15; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "maxB", maxB); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,maxB,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "maxB", maxB); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector > >(N,std::vector >(maxB,std::vector(T,double(0)))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < outcome_limit_2__; ++i_2__) { - size_t outcome_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__][i_2__] = vals_r__[pos__++]; - } - } - } - - // validate, data variables - current_statement_begin__ = 8; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 9; - check_greater_or_equal(function__,"T",T,0); - current_statement_begin__ = 10; - check_greater_or_equal(function__,"maxB",maxB,1); - current_statement_begin__ = 11; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"B[k0__]",B[k0__],1); - } - current_statement_begin__ = 12; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < maxB; ++k1__) { - check_greater_or_equal(function__,"Tsubj[k0__][k1__]",Tsubj[k0__][k1__],0); - check_less_or_equal(function__,"Tsubj[k0__][k1__]",Tsubj[k0__][k1__],T); - } - } - current_statement_begin__ = 14; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < maxB; ++k1__) { - for (int k2__ = 0; k2__ < T; ++k2__) { - check_greater_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],2); - } - } - } - current_statement_begin__ = 15; - // initialize data variables - current_statement_begin__ = 20; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 21; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 20; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 27; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 28; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 31; - validate_non_negative_index("eta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 32; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 33; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious_multipleB() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pr"))) - throw std::runtime_error("variable eta_pr missing"); - vals_r__ = context__.vals_r("eta_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pr", "N", N); - context__.validate_dims("initialization", "eta_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix eta_pr; - (void) eta_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pr = in__.vector_constrain(N,lp__); - else - eta_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 38; - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - current_statement_begin__ = 39; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 40; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 42; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 43; - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - current_statement_begin__ = 44; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - current_statement_begin__ = 46; - stan::math::assign(alpha, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),alpha_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 38; - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - current_statement_begin__ = 39; - current_statement_begin__ = 40; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - current_statement_begin__ = 50; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(get_base1(sigma,1,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 52; - lp_accum__.add(cauchy_log(get_base1(sigma,2,"sigma",1), 0, 1.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(get_base1(sigma,3,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(eta_pr, 0, 1)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 60; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 61; - for (int bIdx = 1; bIdx <= get_base1(B,i,"B",1); ++bIdx) { - { - current_statement_begin__ = 63; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 64; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 66; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - current_statement_begin__ = 67; - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - current_statement_begin__ = 70; - stan::math::assign(ev, initV); - current_statement_begin__ = 72; - for (int t = 1; t <= get_base1(get_base1(Tsubj,i,"Tsubj",1),bIdx,"Tsubj",2); ++t) { - - current_statement_begin__ = 74; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - current_statement_begin__ = 75; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 76; - lp_accum__.add(categorical_log(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3), prob)); - current_statement_begin__ = 80; - stan::math::assign(PE, (get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3) - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1))); - current_statement_begin__ = 81; - stan::math::assign(PEnc, (-(get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3)) - get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1))); - current_statement_begin__ = 84; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1) + (get_base1(eta,i,"eta",1) * PE))), - "assigning variable ev"); - current_statement_begin__ = 85; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1) + (get_base1(eta,i,"eta",1) * PEnc))), - "assigning variable ev"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pr"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("mu_eta"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_multipleB_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d eta_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 38; - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - current_statement_begin__ = 39; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 40; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 42; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 43; - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - current_statement_begin__ = 44; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - current_statement_begin__ = 46; - stan::math::assign(alpha, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),alpha_pr))); - - // validate transformed parameters - current_statement_begin__ = 38; - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - current_statement_begin__ = 39; - current_statement_begin__ = 40; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 93; - local_scalar_t__ mu_eta; - (void) mu_eta; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta, DUMMY_VAR__); - stan::math::fill(mu_eta,DUMMY_VAR__); - current_statement_begin__ = 94; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 95; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 98; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 101; - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "maxB", maxB); - validate_non_negative_index("mr_ev_c", "T", T); - vector > > mr_ev_c(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - current_statement_begin__ = 102; - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "maxB", maxB); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > > mr_ev_nc(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - current_statement_begin__ = 104; - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "maxB", maxB); - validate_non_negative_index("mr_pe_c", "T", T); - vector > > mr_pe_c(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - current_statement_begin__ = 105; - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "maxB", maxB); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > > mr_pe_nc(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - current_statement_begin__ = 106; - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "maxB", maxB); - validate_non_negative_index("mr_dv", "T", T); - vector > > mr_dv(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - current_statement_begin__ = 109; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "maxB", maxB); - validate_non_negative_index("y_pred", "T", T); - vector > > y_pred(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 112; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 113; - for (int b = 1; b <= maxB; ++b) { - - current_statement_begin__ = 114; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 115; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_ev_c"); - current_statement_begin__ = 116; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_ev_nc"); - current_statement_begin__ = 118; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_pe_c"); - current_statement_begin__ = 119; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 120; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_dv"); - current_statement_begin__ = 122; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - -(1), - "assigning variable y_pred"); - } - } - } - current_statement_begin__ = 127; - stan::math::assign(mu_eta, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 128; - stan::math::assign(mu_alpha, get_base1(mu_p,2,"mu_p",1)); - current_statement_begin__ = 129; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - current_statement_begin__ = 132; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 134; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 136; - for (int bIdx = 1; bIdx <= get_base1(B,i,"B",1); ++bIdx) { - { - current_statement_begin__ = 138; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 139; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 141; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - current_statement_begin__ = 142; - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - current_statement_begin__ = 145; - stan::math::assign(ev, initV); - current_statement_begin__ = 147; - for (int t = 1; t <= get_base1(get_base1(Tsubj,i,"Tsubj",1),bIdx,"Tsubj",2); ++t) { - - current_statement_begin__ = 149; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - current_statement_begin__ = 150; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 152; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_log(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),prob))), - "assigning variable log_lik"); - current_statement_begin__ = 155; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 158; - stan::math::assign(PE, (get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3) - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1))); - current_statement_begin__ = 159; - stan::math::assign(PEnc, (-(get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3)) - get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1))); - current_statement_begin__ = 162; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1), - "assigning variable mr_ev_c"); - current_statement_begin__ = 163; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1), - "assigning variable mr_ev_nc"); - current_statement_begin__ = 165; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - PE, - "assigning variable mr_pe_c"); - current_statement_begin__ = 166; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - PEnc, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 167; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - (PE - PEnc), - "assigning variable mr_dv"); - current_statement_begin__ = 170; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1) + (get_base1(eta,i,"eta",1) * PE))), - "assigning variable ev"); - current_statement_begin__ = 171; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1) + (get_base1(eta,i,"eta",1) * PEnc))), - "assigning variable ev"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 93; - check_greater_or_equal(function__,"mu_eta",mu_eta,0); - check_less_or_equal(function__,"mu_eta",mu_eta,1); - current_statement_begin__ = 94; - current_statement_begin__ = 95; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - current_statement_begin__ = 98; - current_statement_begin__ = 101; - current_statement_begin__ = 102; - current_statement_begin__ = 104; - current_statement_begin__ = 105; - current_statement_begin__ = 106; - current_statement_begin__ = 109; - - // write generated quantities - vars__.push_back(mu_eta); - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__][k_2__]); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious_multipleB"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_rp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious_rp"); - reader.add_event(183, 181, "end", "model_prl_fictitious_rp"); - return reader; -} - -class model_prl_fictitious_rp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_fictitious_rp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious_rp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_rp_namespace::model_prl_fictitious_rp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 9; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 10; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 11; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 12; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 8; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 9; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 10; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 11; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - current_statement_begin__ = 12; - // initialize data variables - current_statement_begin__ = 17; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 18; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 17; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 24; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 25; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 28; - validate_non_negative_index("eta_pos_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 29; - validate_non_negative_index("eta_neg_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 30; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 31; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious_rp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pos_pr"))) - throw std::runtime_error("variable eta_pos_pr missing"); - vals_r__ = context__.vals_r("eta_pos_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pos_pr", "N", N); - context__.validate_dims("initialization", "eta_pos_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pos_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pos_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pos_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pos_pr: ") + e.what()); - } - - if (!(context__.contains_r("eta_neg_pr"))) - throw std::runtime_error("variable eta_neg_pr missing"); - vals_r__ = context__.vals_r("eta_neg_pr"); - pos__ = 0U; - validate_non_negative_index("eta_neg_pr", "N", N); - context__.validate_dims("initialization", "eta_neg_pr", "vector_d", context__.to_vec(N)); - vector_d eta_neg_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_neg_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_neg_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_neg_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix eta_pos_pr; - (void) eta_pos_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pos_pr = in__.vector_constrain(N,lp__); - else - eta_pos_pr = in__.vector_constrain(N); - - Eigen::Matrix eta_neg_pr; - (void) eta_neg_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_neg_pr = in__.vector_constrain(N,lp__); - else - eta_neg_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 36; - validate_non_negative_index("eta_pos", "N", N); - Eigen::Matrix eta_pos(static_cast(N)); - (void) eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(eta_pos, DUMMY_VAR__); - stan::math::fill(eta_pos,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("eta_neg", "N", N); - Eigen::Matrix eta_neg(static_cast(N)); - (void) eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(eta_neg, DUMMY_VAR__); - stan::math::fill(eta_neg,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 39; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 41; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 42; - stan::model::assign(eta_pos, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pos_pr,i,"eta_pos_pr",1)))), - "assigning variable eta_pos"); - current_statement_begin__ = 43; - stan::model::assign(eta_neg, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(eta_neg_pr,i,"eta_neg_pr",1)))), - "assigning variable eta_neg"); - current_statement_begin__ = 44; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - current_statement_begin__ = 46; - stan::math::assign(alpha, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),alpha_pr))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta_pos(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta_pos" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta_neg(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta_neg" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 36; - check_greater_or_equal(function__,"eta_pos",eta_pos,0); - check_less_or_equal(function__,"eta_pos",eta_pos,1); - current_statement_begin__ = 37; - check_greater_or_equal(function__,"eta_neg",eta_neg,0); - check_less_or_equal(function__,"eta_neg",eta_neg,1); - current_statement_begin__ = 38; - current_statement_begin__ = 39; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - current_statement_begin__ = 51; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(stan::model::rvalue(sigma, stan::model::cons_list(stan::model::index_min_max(1, 2), stan::model::nil_index_list()), "sigma"), 0, 0.20000000000000001)); - current_statement_begin__ = 53; - lp_accum__.add(cauchy_log(get_base1(sigma,3,"sigma",1), 0, 1.0)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(get_base1(sigma,4,"sigma",1), 0, 0.20000000000000001)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(eta_pos_pr, 0, 1)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(eta_neg_pr, 0, 1)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(alpha_pr, 0, 1)); - current_statement_begin__ = 60; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 62; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 64; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 65; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 67; - local_scalar_t__ pe_c; - (void) pe_c; // dummy to suppress unused var warning - - stan::math::initialize(pe_c, DUMMY_VAR__); - stan::math::fill(pe_c,DUMMY_VAR__); - current_statement_begin__ = 68; - local_scalar_t__ pe_nc; - (void) pe_nc; // dummy to suppress unused var warning - - stan::math::initialize(pe_nc, DUMMY_VAR__); - stan::math::fill(pe_nc,DUMMY_VAR__); - - - current_statement_begin__ = 71; - stan::math::assign(ev, initV); - current_statement_begin__ = 73; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 75; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - current_statement_begin__ = 76; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 77; - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), prob)); - current_statement_begin__ = 80; - stan::math::assign(pe_c, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 81; - stan::math::assign(pe_nc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - current_statement_begin__ = 84; - if (as_bool(logical_gte(pe_c,0))) { - - current_statement_begin__ = 85; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_c)), - "assigning variable ev"); - current_statement_begin__ = 86; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_nc)), - "assigning variable ev"); - } else { - - current_statement_begin__ = 88; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_c)), - "assigning variable ev"); - current_statement_begin__ = 89; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_nc)), - "assigning variable ev"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pos_pr"); - names__.push_back("eta_neg_pr"); - names__.push_back("alpha_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta_pos"); - names__.push_back("eta_neg"); - names__.push_back("alpha"); - names__.push_back("beta"); - names__.push_back("mu_eta_pos"); - names__.push_back("mu_eta_neg"); - names__.push_back("mu_alpha"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_rp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d eta_pos_pr = in__.vector_constrain(N); - vector_d eta_neg_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pos_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_neg_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 36; - validate_non_negative_index("eta_pos", "N", N); - Eigen::Matrix eta_pos(static_cast(N)); - (void) eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(eta_pos, DUMMY_VAR__); - stan::math::fill(eta_pos,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("eta_neg", "N", N); - Eigen::Matrix eta_neg(static_cast(N)); - (void) eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(eta_neg, DUMMY_VAR__); - stan::math::fill(eta_neg,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("alpha", "N", N); - Eigen::Matrix alpha(static_cast(N)); - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 39; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 41; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 42; - stan::model::assign(eta_pos, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pos_pr,i,"eta_pos_pr",1)))), - "assigning variable eta_pos"); - current_statement_begin__ = 43; - stan::model::assign(eta_neg, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(eta_neg_pr,i,"eta_neg_pr",1)))), - "assigning variable eta_neg"); - current_statement_begin__ = 44; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - current_statement_begin__ = 46; - stan::math::assign(alpha, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),alpha_pr))); - - // validate transformed parameters - current_statement_begin__ = 36; - check_greater_or_equal(function__,"eta_pos",eta_pos,0); - check_less_or_equal(function__,"eta_pos",eta_pos,1); - current_statement_begin__ = 37; - check_greater_or_equal(function__,"eta_neg",eta_neg,0); - check_less_or_equal(function__,"eta_neg",eta_neg,1); - current_statement_begin__ = 38; - current_statement_begin__ = 39; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pos[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_neg[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 97; - local_scalar_t__ mu_eta_pos; - (void) mu_eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta_pos, DUMMY_VAR__); - stan::math::fill(mu_eta_pos,DUMMY_VAR__); - current_statement_begin__ = 98; - local_scalar_t__ mu_eta_neg; - (void) mu_eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta_neg, DUMMY_VAR__); - stan::math::fill(mu_eta_neg,DUMMY_VAR__); - current_statement_begin__ = 99; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 100; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 103; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 106; - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - current_statement_begin__ = 107; - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - current_statement_begin__ = 109; - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "T", T); - vector > mr_pe_c(N, (vector(T))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - current_statement_begin__ = 110; - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > mr_pe_nc(N, (vector(T))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - current_statement_begin__ = 112; - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "T", T); - vector > mr_dv(N, (vector(T))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - current_statement_begin__ = 115; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 118; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 119; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 120; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - current_statement_begin__ = 121; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - current_statement_begin__ = 122; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_c"); - current_statement_begin__ = 123; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 124; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_dv"); - current_statement_begin__ = 126; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 130; - stan::math::assign(mu_eta_pos, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 131; - stan::math::assign(mu_eta_neg, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 132; - stan::math::assign(mu_alpha, get_base1(mu_p,3,"mu_p",1)); - current_statement_begin__ = 133; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,4,"mu_p",1)) * 5)); - - current_statement_begin__ = 136; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 138; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 139; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 141; - local_scalar_t__ pe_c; - (void) pe_c; // dummy to suppress unused var warning - - stan::math::initialize(pe_c, DUMMY_VAR__); - stan::math::fill(pe_c,DUMMY_VAR__); - current_statement_begin__ = 142; - local_scalar_t__ pe_nc; - (void) pe_nc; // dummy to suppress unused var warning - - stan::math::initialize(pe_nc, DUMMY_VAR__); - stan::math::fill(pe_nc,DUMMY_VAR__); - - - current_statement_begin__ = 145; - stan::math::assign(ev, initV); - current_statement_begin__ = 147; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 149; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 151; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(alpha,i,"alpha",1) - (get_base1(ev,1,"ev",1) - get_base1(ev,2,"ev",1))))))), - "assigning variable prob"); - current_statement_begin__ = 152; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 154; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),prob)), - "assigning variable log_lik"); - current_statement_begin__ = 157; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 160; - stan::math::assign(pe_c, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 161; - stan::math::assign(pe_nc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - current_statement_begin__ = 164; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - current_statement_begin__ = 165; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - current_statement_begin__ = 166; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe_c, - "assigning variable mr_pe_c"); - current_statement_begin__ = 167; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe_nc, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 168; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - (pe_c - pe_nc), - "assigning variable mr_dv"); - current_statement_begin__ = 171; - if (as_bool(logical_gte(pe_c,0))) { - - current_statement_begin__ = 172; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_c)), - "assigning variable ev"); - current_statement_begin__ = 173; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_nc)), - "assigning variable ev"); - } else { - - current_statement_begin__ = 175; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_c)), - "assigning variable ev"); - current_statement_begin__ = 176; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_nc)), - "assigning variable ev"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 97; - check_greater_or_equal(function__,"mu_eta_pos",mu_eta_pos,0); - check_less_or_equal(function__,"mu_eta_pos",mu_eta_pos,1); - current_statement_begin__ = 98; - check_greater_or_equal(function__,"mu_eta_neg",mu_eta_neg,0); - check_less_or_equal(function__,"mu_eta_neg",mu_eta_neg,1); - current_statement_begin__ = 99; - current_statement_begin__ = 100; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - current_statement_begin__ = 103; - current_statement_begin__ = 106; - current_statement_begin__ = 107; - current_statement_begin__ = 109; - current_statement_begin__ = 110; - current_statement_begin__ = 112; - current_statement_begin__ = 115; - - // write generated quantities - vars__.push_back(mu_eta_pos); - vars__.push_back(mu_eta_neg); - vars__.push_back(mu_alpha); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious_rp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_rp_woa_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious_rp_woa"); - reader.add_event(175, 173, "end", "model_prl_fictitious_rp_woa"); - return reader; -} - -class model_prl_fictitious_rp_woa : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_fictitious_rp_woa(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious_rp_woa(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_rp_woa_namespace::model_prl_fictitious_rp_woa"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 9; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 10; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 11; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 12; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 8; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 9; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 10; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 11; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - current_statement_begin__ = 12; - // initialize data variables - current_statement_begin__ = 17; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 18; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 17; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 24; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 25; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 28; - validate_non_negative_index("eta_pos_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 29; - validate_non_negative_index("eta_neg_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 30; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious_rp_woa() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pos_pr"))) - throw std::runtime_error("variable eta_pos_pr missing"); - vals_r__ = context__.vals_r("eta_pos_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pos_pr", "N", N); - context__.validate_dims("initialization", "eta_pos_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pos_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pos_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pos_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pos_pr: ") + e.what()); - } - - if (!(context__.contains_r("eta_neg_pr"))) - throw std::runtime_error("variable eta_neg_pr missing"); - vals_r__ = context__.vals_r("eta_neg_pr"); - pos__ = 0U; - validate_non_negative_index("eta_neg_pr", "N", N); - context__.validate_dims("initialization", "eta_neg_pr", "vector_d", context__.to_vec(N)); - vector_d eta_neg_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_neg_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_neg_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_neg_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix eta_pos_pr; - (void) eta_pos_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pos_pr = in__.vector_constrain(N,lp__); - else - eta_pos_pr = in__.vector_constrain(N); - - Eigen::Matrix eta_neg_pr; - (void) eta_neg_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_neg_pr = in__.vector_constrain(N,lp__); - else - eta_neg_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 35; - validate_non_negative_index("eta_pos", "N", N); - Eigen::Matrix eta_pos(static_cast(N)); - (void) eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(eta_pos, DUMMY_VAR__); - stan::math::fill(eta_pos,DUMMY_VAR__); - current_statement_begin__ = 36; - validate_non_negative_index("eta_neg", "N", N); - Eigen::Matrix eta_neg(static_cast(N)); - (void) eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(eta_neg, DUMMY_VAR__); - stan::math::fill(eta_neg,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 39; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 40; - stan::model::assign(eta_pos, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pos_pr,i,"eta_pos_pr",1)))), - "assigning variable eta_pos"); - current_statement_begin__ = 41; - stan::model::assign(eta_neg, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(eta_neg_pr,i,"eta_neg_pr",1)))), - "assigning variable eta_neg"); - current_statement_begin__ = 42; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta_pos(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta_pos" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta_neg(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta_neg" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 35; - check_greater_or_equal(function__,"eta_pos",eta_pos,0); - check_less_or_equal(function__,"eta_pos",eta_pos,1); - current_statement_begin__ = 36; - check_greater_or_equal(function__,"eta_neg",eta_neg,0); - check_less_or_equal(function__,"eta_neg",eta_neg,1); - current_statement_begin__ = 37; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - current_statement_begin__ = 48; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(eta_pos_pr, 0, 1)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(eta_neg_pr, 0, 1)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 56; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 58; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 59; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 61; - local_scalar_t__ pe_c; - (void) pe_c; // dummy to suppress unused var warning - - stan::math::initialize(pe_c, DUMMY_VAR__); - stan::math::fill(pe_c,DUMMY_VAR__); - current_statement_begin__ = 62; - local_scalar_t__ pe_nc; - (void) pe_nc; // dummy to suppress unused var warning - - stan::math::initialize(pe_nc, DUMMY_VAR__); - stan::math::fill(pe_nc,DUMMY_VAR__); - - - current_statement_begin__ = 65; - stan::math::assign(ev, initV); - current_statement_begin__ = 67; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 69; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(ev,2,"ev",1) - get_base1(ev,1,"ev",1)))))), - "assigning variable prob"); - current_statement_begin__ = 70; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 71; - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), prob)); - current_statement_begin__ = 74; - stan::math::assign(pe_c, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 75; - stan::math::assign(pe_nc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - current_statement_begin__ = 78; - if (as_bool(logical_gte(pe_c,0))) { - - current_statement_begin__ = 79; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_c)), - "assigning variable ev"); - current_statement_begin__ = 80; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_nc)), - "assigning variable ev"); - } else { - - current_statement_begin__ = 82; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_c)), - "assigning variable ev"); - current_statement_begin__ = 83; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_nc)), - "assigning variable ev"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pos_pr"); - names__.push_back("eta_neg_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta_pos"); - names__.push_back("eta_neg"); - names__.push_back("beta"); - names__.push_back("mu_eta_pos"); - names__.push_back("mu_eta_neg"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_rp_woa_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d eta_pos_pr = in__.vector_constrain(N); - vector_d eta_neg_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pos_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_neg_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 35; - validate_non_negative_index("eta_pos", "N", N); - Eigen::Matrix eta_pos(static_cast(N)); - (void) eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(eta_pos, DUMMY_VAR__); - stan::math::fill(eta_pos,DUMMY_VAR__); - current_statement_begin__ = 36; - validate_non_negative_index("eta_neg", "N", N); - Eigen::Matrix eta_neg(static_cast(N)); - (void) eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(eta_neg, DUMMY_VAR__); - stan::math::fill(eta_neg,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 39; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 40; - stan::model::assign(eta_pos, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pos_pr,i,"eta_pos_pr",1)))), - "assigning variable eta_pos"); - current_statement_begin__ = 41; - stan::model::assign(eta_neg, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(eta_neg_pr,i,"eta_neg_pr",1)))), - "assigning variable eta_neg"); - current_statement_begin__ = 42; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - current_statement_begin__ = 35; - check_greater_or_equal(function__,"eta_pos",eta_pos,0); - check_less_or_equal(function__,"eta_pos",eta_pos,1); - current_statement_begin__ = 36; - check_greater_or_equal(function__,"eta_neg",eta_neg,0); - check_less_or_equal(function__,"eta_neg",eta_neg,1); - current_statement_begin__ = 37; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pos[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_neg[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 91; - local_scalar_t__ mu_eta_pos; - (void) mu_eta_pos; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta_pos, DUMMY_VAR__); - stan::math::fill(mu_eta_pos,DUMMY_VAR__); - current_statement_begin__ = 92; - local_scalar_t__ mu_eta_neg; - (void) mu_eta_neg; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta_neg, DUMMY_VAR__); - stan::math::fill(mu_eta_neg,DUMMY_VAR__); - current_statement_begin__ = 93; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 96; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 99; - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - current_statement_begin__ = 100; - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - current_statement_begin__ = 102; - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "T", T); - vector > mr_pe_c(N, (vector(T))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - current_statement_begin__ = 103; - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > mr_pe_nc(N, (vector(T))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - current_statement_begin__ = 105; - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "T", T); - vector > mr_dv(N, (vector(T))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - current_statement_begin__ = 108; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 111; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 112; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 113; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - current_statement_begin__ = 114; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - current_statement_begin__ = 115; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_c"); - current_statement_begin__ = 116; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 117; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_dv"); - current_statement_begin__ = 119; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 123; - stan::math::assign(mu_eta_pos, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 124; - stan::math::assign(mu_eta_neg, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 125; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - - current_statement_begin__ = 128; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 130; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 131; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 133; - local_scalar_t__ pe_c; - (void) pe_c; // dummy to suppress unused var warning - - stan::math::initialize(pe_c, DUMMY_VAR__); - stan::math::fill(pe_c,DUMMY_VAR__); - current_statement_begin__ = 134; - local_scalar_t__ pe_nc; - (void) pe_nc; // dummy to suppress unused var warning - - stan::math::initialize(pe_nc, DUMMY_VAR__); - stan::math::fill(pe_nc,DUMMY_VAR__); - - - current_statement_begin__ = 137; - stan::math::assign(ev, initV); - current_statement_begin__ = 139; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 141; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 143; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(ev,2,"ev",1) - get_base1(ev,1,"ev",1)))))), - "assigning variable prob"); - current_statement_begin__ = 144; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 146; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),prob)), - "assigning variable log_lik"); - current_statement_begin__ = 149; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 152; - stan::math::assign(pe_c, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 153; - stan::math::assign(pe_nc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - current_statement_begin__ = 156; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - current_statement_begin__ = 157; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - current_statement_begin__ = 158; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe_c, - "assigning variable mr_pe_c"); - current_statement_begin__ = 159; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe_nc, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 160; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - (pe_c - pe_nc), - "assigning variable mr_dv"); - current_statement_begin__ = 163; - if (as_bool(logical_gte(pe_c,0))) { - - current_statement_begin__ = 164; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_c)), - "assigning variable ev"); - current_statement_begin__ = 165; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_pos,i,"eta_pos",1) * pe_nc)), - "assigning variable ev"); - } else { - - current_statement_begin__ = 167; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_c)), - "assigning variable ev"); - current_statement_begin__ = 168; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta_neg,i,"eta_neg",1) * pe_nc)), - "assigning variable ev"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 91; - check_greater_or_equal(function__,"mu_eta_pos",mu_eta_pos,0); - check_less_or_equal(function__,"mu_eta_pos",mu_eta_pos,1); - current_statement_begin__ = 92; - check_greater_or_equal(function__,"mu_eta_neg",mu_eta_neg,0); - check_less_or_equal(function__,"mu_eta_neg",mu_eta_neg,1); - current_statement_begin__ = 93; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - current_statement_begin__ = 96; - current_statement_begin__ = 99; - current_statement_begin__ = 100; - current_statement_begin__ = 102; - current_statement_begin__ = 103; - current_statement_begin__ = 105; - current_statement_begin__ = 108; - - // write generated quantities - vars__.push_back(mu_eta_pos); - vars__.push_back(mu_eta_neg); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious_rp_woa"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_fictitious_woa_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_fictitious_woa"); - reader.add_event(160, 158, "end", "model_prl_fictitious_woa"); - return reader; -} - -class model_prl_fictitious_woa : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_fictitious_woa(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_fictitious_woa(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_fictitious_woa_namespace::model_prl_fictitious_woa"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 9; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 10; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 11; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 12; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 8; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 9; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 10; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 11; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - current_statement_begin__ = 12; - // initialize data variables - current_statement_begin__ = 17; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 18; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 17; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 24; - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 25; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 28; - validate_non_negative_index("eta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 29; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_fictitious_woa() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("eta_pr"))) - throw std::runtime_error("variable eta_pr missing"); - vals_r__ = context__.vals_r("eta_pr"); - pos__ = 0U; - validate_non_negative_index("eta_pr", "N", N); - context__.validate_dims("initialization", "eta_pr", "vector_d", context__.to_vec(N)); - vector_d eta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - eta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(eta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable eta_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix eta_pr; - (void) eta_pr; // dummy to suppress unused var warning - if (jacobian__) - eta_pr = in__.vector_constrain(N,lp__); - else - eta_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 34; - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 37; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 38; - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - current_statement_begin__ = 39; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(eta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: eta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 34; - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // model body - - current_statement_begin__ = 45; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(eta_pr, 0, 1)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 52; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 54; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 55; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 57; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - current_statement_begin__ = 58; - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - current_statement_begin__ = 61; - stan::math::assign(ev, initV); - current_statement_begin__ = 63; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 65; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(ev,2,"ev",1) - get_base1(ev,1,"ev",1)))))), - "assigning variable prob"); - current_statement_begin__ = 66; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 67; - lp_accum__.add(categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), prob)); - current_statement_begin__ = 70; - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 71; - stan::math::assign(PEnc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - current_statement_begin__ = 74; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1) + (get_base1(eta,i,"eta",1) * PE))), - "assigning variable ev"); - current_statement_begin__ = 75; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1) + (get_base1(eta,i,"eta",1) * PEnc))), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("eta_pr"); - names__.push_back("beta_pr"); - names__.push_back("eta"); - names__.push_back("beta"); - names__.push_back("mu_eta"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe_c"); - names__.push_back("mr_pe_nc"); - names__.push_back("mr_dv"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_fictitious_woa_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d eta_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 34; - validate_non_negative_index("eta", "N", N); - Eigen::Matrix eta(static_cast(N)); - (void) eta; // dummy to suppress unused var warning - - stan::math::initialize(eta, DUMMY_VAR__); - stan::math::fill(eta,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 37; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 38; - stan::model::assign(eta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(eta_pr,i,"eta_pr",1)))), - "assigning variable eta"); - current_statement_begin__ = 39; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 5), - "assigning variable beta"); - } - - // validate transformed parameters - current_statement_begin__ = 34; - check_greater_or_equal(function__,"eta",eta,0); - check_less_or_equal(function__,"eta",eta,1); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,5); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(eta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 82; - local_scalar_t__ mu_eta; - (void) mu_eta; // dummy to suppress unused var warning - - stan::math::initialize(mu_eta, DUMMY_VAR__); - stan::math::fill(mu_eta,DUMMY_VAR__); - current_statement_begin__ = 83; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 86; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 89; - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - current_statement_begin__ = 90; - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - current_statement_begin__ = 92; - validate_non_negative_index("mr_pe_c", "N", N); - validate_non_negative_index("mr_pe_c", "T", T); - vector > mr_pe_c(N, (vector(T))); - stan::math::initialize(mr_pe_c, DUMMY_VAR__); - stan::math::fill(mr_pe_c,DUMMY_VAR__); - current_statement_begin__ = 93; - validate_non_negative_index("mr_pe_nc", "N", N); - validate_non_negative_index("mr_pe_nc", "T", T); - vector > mr_pe_nc(N, (vector(T))); - stan::math::initialize(mr_pe_nc, DUMMY_VAR__); - stan::math::fill(mr_pe_nc,DUMMY_VAR__); - current_statement_begin__ = 94; - validate_non_negative_index("mr_dv", "N", N); - validate_non_negative_index("mr_dv", "T", T); - vector > mr_dv(N, (vector(T))); - stan::math::initialize(mr_dv, DUMMY_VAR__); - stan::math::fill(mr_dv,DUMMY_VAR__); - current_statement_begin__ = 97; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 100; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 101; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 102; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - current_statement_begin__ = 103; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - current_statement_begin__ = 105; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_c"); - current_statement_begin__ = 106; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 107; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_dv"); - current_statement_begin__ = 109; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 113; - stan::math::assign(mu_eta, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 114; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - - current_statement_begin__ = 117; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 119; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 120; - validate_non_negative_index("prob", "2", 2); - Eigen::Matrix prob(static_cast(2)); - (void) prob; // dummy to suppress unused var warning - - stan::math::initialize(prob, DUMMY_VAR__); - stan::math::fill(prob,DUMMY_VAR__); - current_statement_begin__ = 122; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - current_statement_begin__ = 123; - local_scalar_t__ PEnc; - (void) PEnc; // dummy to suppress unused var warning - - stan::math::initialize(PEnc, DUMMY_VAR__); - stan::math::fill(PEnc,DUMMY_VAR__); - - - current_statement_begin__ = 126; - stan::math::assign(ev, initV); - current_statement_begin__ = 128; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 130; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 132; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - (1 / (1 + stan::math::exp((get_base1(beta,i,"beta",1) * (get_base1(ev,2,"ev",1) - get_base1(ev,1,"ev",1)))))), - "assigning variable prob"); - current_statement_begin__ = 133; - stan::model::assign(prob, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - stan::model::deep_copy((1 - get_base1(prob,1,"prob",1))), - "assigning variable prob"); - current_statement_begin__ = 135; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),prob))), - "assigning variable log_lik"); - current_statement_begin__ = 138; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(prob, base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 141; - stan::math::assign(PE, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 142; - stan::math::assign(PEnc, (-(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1))); - current_statement_begin__ = 145; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - current_statement_begin__ = 146; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - current_statement_begin__ = 148; - stan::model::assign(mr_pe_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - PE, - "assigning variable mr_pe_c"); - current_statement_begin__ = 149; - stan::model::assign(mr_pe_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - PEnc, - "assigning variable mr_pe_nc"); - current_statement_begin__ = 150; - stan::model::assign(mr_dv, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - (PE - PEnc), - "assigning variable mr_dv"); - current_statement_begin__ = 153; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(eta,i,"eta",1) * PE)), - "assigning variable ev"); - current_statement_begin__ = 154; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni((3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2))), stan::model::nil_index_list()), "ev") + (get_base1(eta,i,"eta",1) * PEnc)), - "assigning variable ev"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 82; - check_greater_or_equal(function__,"mu_eta",mu_eta,0); - check_less_or_equal(function__,"mu_eta",mu_eta,1); - current_statement_begin__ = 83; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,5); - current_statement_begin__ = 86; - current_statement_begin__ = 89; - current_statement_begin__ = 90; - current_statement_begin__ = 92; - current_statement_begin__ = 93; - current_statement_begin__ = 94; - current_statement_begin__ = 97; - - // write generated quantities - vars__.push_back(mu_eta); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_dv[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_fictitious_woa"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "eta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_eta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_dv" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_rp_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_rp"); - reader.add_event(148, 146, "end", "model_prl_rp"); - return reader; -} - -class model_prl_rp : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > choice; - vector > outcome; - vector_d initV; -public: - model_prl_rp(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_rp(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_rp_namespace::model_prl_rp"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 9; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 10; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 12; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 13; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 8; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 9; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 10; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 12; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],2); - } - } - current_statement_begin__ = 13; - // initialize data variables - current_statement_begin__ = 18; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 19; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 18; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 25; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 26; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 29; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 30; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 31; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_rp() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 36; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 40; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 41; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 42; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 43; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 36; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 37; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 38; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - current_statement_begin__ = 49; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(Apun_pr, 0, 1)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(Arew_pr, 0, 1)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 57; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 59; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 60; - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - - - current_statement_begin__ = 63; - stan::math::assign(ev, initV); - current_statement_begin__ = 65; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 67; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2), multiply(ev,get_base1(beta,i,"beta",1)))); - current_statement_begin__ = 70; - stan::math::assign(pe, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 73; - if (as_bool(logical_gt(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - current_statement_begin__ = 74; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(Arew,i,"Arew",1) * pe)), - "assigning variable ev"); - } else { - current_statement_begin__ = 76; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(Apun,i,"Apun",1) * pe)), - "assigning variable ev"); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Apun_pr"); - names__.push_back("Arew_pr"); - names__.push_back("beta_pr"); - names__.push_back("Apun"); - names__.push_back("Arew"); - names__.push_back("beta"); - names__.push_back("mu_Apun"); - names__.push_back("mu_Arew"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_rp_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 36; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 37; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 40; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 41; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 42; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 43; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - current_statement_begin__ = 36; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 37; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 38; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 83; - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - current_statement_begin__ = 84; - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - current_statement_begin__ = 85; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 88; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 91; - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "T", T); - vector > mr_ev_c(N, (vector(T))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - current_statement_begin__ = 92; - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > mr_ev_nc(N, (vector(T))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - current_statement_begin__ = 93; - validate_non_negative_index("mr_pe", "N", N); - validate_non_negative_index("mr_pe", "T", T); - vector > mr_pe(N, (vector(T))); - stan::math::initialize(mr_pe, DUMMY_VAR__); - stan::math::fill(mr_pe,DUMMY_VAR__); - current_statement_begin__ = 96; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 99; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 100; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 101; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_c"); - current_statement_begin__ = 102; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_ev_nc"); - current_statement_begin__ = 103; - stan::model::assign(mr_pe, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - 0, - "assigning variable mr_pe"); - current_statement_begin__ = 105; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 109; - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 110; - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 111; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - current_statement_begin__ = 114; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 116; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 117; - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - - - current_statement_begin__ = 120; - stan::math::assign(ev, initV); - current_statement_begin__ = 121; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 123; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 125; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + categorical_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),multiply(ev,get_base1(beta,i,"beta",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 128; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - categorical_rng(softmax(multiply(ev,get_base1(beta,i,"beta",1))), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 131; - stan::math::assign(pe, (get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2) - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1))); - current_statement_begin__ = 134; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,get_base1(get_base1(choice,i,"choice",1),t,"choice",2),"ev",1), - "assigning variable mr_ev_c"); - current_statement_begin__ = 135; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - get_base1(ev,(3 - get_base1(get_base1(choice,i,"choice",1),t,"choice",2)),"ev",1), - "assigning variable mr_ev_nc"); - current_statement_begin__ = 136; - stan::model::assign(mr_pe, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - pe, - "assigning variable mr_pe"); - current_statement_begin__ = 139; - if (as_bool(logical_gt(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),0))) { - current_statement_begin__ = 140; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(Arew,i,"Arew",1) * pe)), - "assigning variable ev"); - } else { - current_statement_begin__ = 142; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(choice,i,"choice",1),t,"choice",2)), stan::model::nil_index_list()), "ev") + (get_base1(Apun,i,"Apun",1) * pe)), - "assigning variable ev"); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 83; - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - current_statement_begin__ = 84; - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - current_statement_begin__ = 85; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - current_statement_begin__ = 88; - current_statement_begin__ = 91; - current_statement_begin__ = 92; - current_statement_begin__ = 93; - current_statement_begin__ = 96; - - // write generated quantities - vars__.push_back(mu_Apun); - vars__.push_back(mu_Arew); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_rp"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_prl_rp_multipleB_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_prl_rp_multipleB"); - reader.add_event(159, 157, "end", "model_prl_rp_multipleB"); - return reader; -} - -class model_prl_rp_multipleB : public prob_grad { -private: - int N; - int T; - int maxB; - vector B; - vector > Tsubj; - vector > > choice; - vector > > outcome; - vector_d initV; -public: - model_prl_rp_multipleB(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_prl_rp_multipleB(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_prl_rp_multipleB_namespace::model_prl_rp_multipleB"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 9; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 11; - context__.validate_dims("data initialization", "maxB", "int", context__.to_vec()); - maxB = int(0); - vals_i__ = context__.vals_i("maxB"); - pos__ = 0; - maxB = vals_i__[pos__++]; - current_statement_begin__ = 12; - validate_non_negative_index("B", "N", N); - context__.validate_dims("data initialization", "B", "int", context__.to_vec(N)); - validate_non_negative_index("B", "N", N); - B = std::vector(N,int(0)); - vals_i__ = context__.vals_i("B"); - pos__ = 0; - size_t B_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < B_limit_0__; ++i_0__) { - B[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 14; - validate_non_negative_index("Tsubj", "N", N); - validate_non_negative_index("Tsubj", "maxB", maxB); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N,maxB)); - validate_non_negative_index("Tsubj", "N", N); - validate_non_negative_index("Tsubj", "maxB", maxB); - Tsubj = std::vector >(N,std::vector(maxB,int(0))); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < Tsubj_limit_1__; ++i_1__) { - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 15; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "maxB", maxB); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,maxB,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "maxB", maxB); - validate_non_negative_index("choice", "T", T); - choice = std::vector > >(N,std::vector >(maxB,std::vector(T,int(0)))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < choice_limit_2__; ++i_2__) { - size_t choice_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__][i_2__] = vals_i__[pos__++]; - } - } - } - current_statement_begin__ = 16; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "maxB", maxB); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,maxB,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "maxB", maxB); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector > >(N,std::vector >(maxB,std::vector(T,double(0)))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < outcome_limit_2__; ++i_2__) { - size_t outcome_limit_1__ = maxB; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__][i_2__] = vals_r__[pos__++]; - } - } - } - - // validate, data variables - current_statement_begin__ = 8; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 9; - check_greater_or_equal(function__,"T",T,0); - current_statement_begin__ = 11; - check_greater_or_equal(function__,"maxB",maxB,1); - current_statement_begin__ = 12; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"B[k0__]",B[k0__],1); - } - current_statement_begin__ = 14; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < maxB; ++k1__) { - check_greater_or_equal(function__,"Tsubj[k0__][k1__]",Tsubj[k0__][k1__],0); - check_less_or_equal(function__,"Tsubj[k0__][k1__]",Tsubj[k0__][k1__],T); - } - } - current_statement_begin__ = 15; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < maxB; ++k1__) { - for (int k2__ = 0; k2__ < T; ++k2__) { - check_greater_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],2); - } - } - } - current_statement_begin__ = 16; - // initialize data variables - current_statement_begin__ = 21; - validate_non_negative_index("initV", "2", 2); - initV = vector_d(static_cast(2)); - stan::math::fill(initV,DUMMY_VAR__); - - current_statement_begin__ = 22; - stan::math::assign(initV, rep_vector(0.0,2)); - - // validate transformed data - current_statement_begin__ = 21; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 28; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 29; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 32; - validate_non_negative_index("Apun_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 33; - validate_non_negative_index("Arew_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 34; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_prl_rp_multipleB() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("Apun_pr"))) - throw std::runtime_error("variable Apun_pr missing"); - vals_r__ = context__.vals_r("Apun_pr"); - pos__ = 0U; - validate_non_negative_index("Apun_pr", "N", N); - context__.validate_dims("initialization", "Apun_pr", "vector_d", context__.to_vec(N)); - vector_d Apun_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Apun_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Apun_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Apun_pr: ") + e.what()); - } - - if (!(context__.contains_r("Arew_pr"))) - throw std::runtime_error("variable Arew_pr missing"); - vals_r__ = context__.vals_r("Arew_pr"); - pos__ = 0U; - validate_non_negative_index("Arew_pr", "N", N); - context__.validate_dims("initialization", "Arew_pr", "vector_d", context__.to_vec(N)); - vector_d Arew_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Arew_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Arew_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Arew_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix Apun_pr; - (void) Apun_pr; // dummy to suppress unused var warning - if (jacobian__) - Apun_pr = in__.vector_constrain(N,lp__); - else - Apun_pr = in__.vector_constrain(N); - - Eigen::Matrix Arew_pr; - (void) Arew_pr; // dummy to suppress unused var warning - if (jacobian__) - Arew_pr = in__.vector_constrain(N,lp__); - else - Arew_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 39; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 40; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 41; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 43; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 44; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 45; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 46; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Apun(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Apun" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Arew(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Arew" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 39; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 40; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 41; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - current_statement_begin__ = 52; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(Apun_pr, 0, 1)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(Arew_pr, 0, 1)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 60; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 61; - for (int bIdx = 1; bIdx <= get_base1(B,i,"B",1); ++bIdx) { - { - current_statement_begin__ = 63; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 64; - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - - - current_statement_begin__ = 67; - stan::math::assign(ev, initV); - current_statement_begin__ = 69; - for (int t = 1; t <= get_base1(get_base1(Tsubj,i,"Tsubj",1),bIdx,"Tsubj",2); ++t) { - - current_statement_begin__ = 71; - lp_accum__.add(categorical_logit_log(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3), multiply(ev,get_base1(beta,i,"beta",1)))); - current_statement_begin__ = 74; - stan::math::assign(pe, (get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3) - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1))); - current_statement_begin__ = 77; - if (as_bool(logical_gt(get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3),0))) { - current_statement_begin__ = 78; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), "ev") + (get_base1(Arew,i,"Arew",1) * pe)), - "assigning variable ev"); - } else { - current_statement_begin__ = 80; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), "ev") + (get_base1(Apun,i,"Apun",1) * pe)), - "assigning variable ev"); - } - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("Apun_pr"); - names__.push_back("Arew_pr"); - names__.push_back("beta_pr"); - names__.push_back("Apun"); - names__.push_back("Arew"); - names__.push_back("beta"); - names__.push_back("mu_Apun"); - names__.push_back("mu_Arew"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - names__.push_back("mr_ev_c"); - names__.push_back("mr_ev_nc"); - names__.push_back("mr_pe"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(maxB); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_prl_rp_multipleB_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d Apun_pr = in__.vector_constrain(N); - vector_d Arew_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 39; - validate_non_negative_index("Apun", "N", N); - Eigen::Matrix Apun(static_cast(N)); - (void) Apun; // dummy to suppress unused var warning - - stan::math::initialize(Apun, DUMMY_VAR__); - stan::math::fill(Apun,DUMMY_VAR__); - current_statement_begin__ = 40; - validate_non_negative_index("Arew", "N", N); - Eigen::Matrix Arew(static_cast(N)); - (void) Arew; // dummy to suppress unused var warning - - stan::math::initialize(Arew, DUMMY_VAR__); - stan::math::fill(Arew,DUMMY_VAR__); - current_statement_begin__ = 41; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 43; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 44; - stan::model::assign(Apun, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(Apun_pr,i,"Apun_pr",1)))), - "assigning variable Apun"); - current_statement_begin__ = 45; - stan::model::assign(Arew, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Arew_pr,i,"Arew_pr",1)))), - "assigning variable Arew"); - current_statement_begin__ = 46; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))) * 10), - "assigning variable beta"); - } - - // validate transformed parameters - current_statement_begin__ = 39; - check_greater_or_equal(function__,"Apun",Apun,0); - check_less_or_equal(function__,"Apun",Apun,1); - current_statement_begin__ = 40; - check_greater_or_equal(function__,"Arew",Arew,0); - check_less_or_equal(function__,"Arew",Arew,1); - current_statement_begin__ = 41; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Apun[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Arew[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 88; - local_scalar_t__ mu_Apun; - (void) mu_Apun; // dummy to suppress unused var warning - - stan::math::initialize(mu_Apun, DUMMY_VAR__); - stan::math::fill(mu_Apun,DUMMY_VAR__); - current_statement_begin__ = 89; - local_scalar_t__ mu_Arew; - (void) mu_Arew; // dummy to suppress unused var warning - - stan::math::initialize(mu_Arew, DUMMY_VAR__); - stan::math::fill(mu_Arew,DUMMY_VAR__); - current_statement_begin__ = 90; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 93; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 96; - validate_non_negative_index("mr_ev_c", "N", N); - validate_non_negative_index("mr_ev_c", "maxB", maxB); - validate_non_negative_index("mr_ev_c", "T", T); - vector > > mr_ev_c(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_ev_c, DUMMY_VAR__); - stan::math::fill(mr_ev_c,DUMMY_VAR__); - current_statement_begin__ = 97; - validate_non_negative_index("mr_ev_nc", "N", N); - validate_non_negative_index("mr_ev_nc", "maxB", maxB); - validate_non_negative_index("mr_ev_nc", "T", T); - vector > > mr_ev_nc(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_ev_nc, DUMMY_VAR__); - stan::math::fill(mr_ev_nc,DUMMY_VAR__); - current_statement_begin__ = 98; - validate_non_negative_index("mr_pe", "N", N); - validate_non_negative_index("mr_pe", "maxB", maxB); - validate_non_negative_index("mr_pe", "T", T); - vector > > mr_pe(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(mr_pe, DUMMY_VAR__); - stan::math::fill(mr_pe,DUMMY_VAR__); - current_statement_begin__ = 101; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "maxB", maxB); - validate_non_negative_index("y_pred", "T", T); - vector > > y_pred(N, (vector >(maxB, (vector(T))))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 104; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 105; - for (int b = 1; b <= maxB; ++b) { - - current_statement_begin__ = 106; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 107; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_ev_c"); - current_statement_begin__ = 108; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_ev_nc"); - current_statement_begin__ = 109; - stan::model::assign(mr_pe, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - 0, - "assigning variable mr_pe"); - current_statement_begin__ = 111; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(b), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - -(1), - "assigning variable y_pred"); - } - } - } - current_statement_begin__ = 116; - stan::math::assign(mu_Apun, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 117; - stan::math::assign(mu_Arew, Phi_approx(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 118; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - current_statement_begin__ = 121; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 123; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 125; - for (int bIdx = 1; bIdx <= get_base1(B,i,"B",1); ++bIdx) { - { - current_statement_begin__ = 127; - validate_non_negative_index("ev", "2", 2); - Eigen::Matrix ev(static_cast(2)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - current_statement_begin__ = 128; - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - - - current_statement_begin__ = 131; - stan::math::assign(ev, initV); - current_statement_begin__ = 133; - for (int t = 1; t <= get_base1(get_base1(Tsubj,i,"Tsubj",1),bIdx,"Tsubj",2); ++t) { - - current_statement_begin__ = 135; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + categorical_logit_log(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),multiply(ev,get_base1(beta,i,"beta",1))))), - "assigning variable log_lik"); - current_statement_begin__ = 138; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - categorical_rng(softmax(multiply(ev,get_base1(beta,i,"beta",1))), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 141; - stan::math::assign(pe, (get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3) - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1))); - current_statement_begin__ = 144; - stan::model::assign(mr_ev_c, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - get_base1(ev,get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3),"ev",1), - "assigning variable mr_ev_c"); - current_statement_begin__ = 145; - stan::model::assign(mr_ev_nc, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - get_base1(ev,(3 - get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)),"ev",1), - "assigning variable mr_ev_nc"); - current_statement_begin__ = 146; - stan::model::assign(mr_pe, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(bIdx), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - pe, - "assigning variable mr_pe"); - current_statement_begin__ = 149; - if (as_bool(logical_gt(get_base1(get_base1(get_base1(outcome,i,"outcome",1),bIdx,"outcome",2),t,"outcome",3),0))) { - current_statement_begin__ = 150; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), "ev") + (get_base1(Arew,i,"Arew",1) * pe)), - "assigning variable ev"); - } else { - current_statement_begin__ = 152; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(get_base1(choice,i,"choice",1),bIdx,"choice",2),t,"choice",3)), stan::model::nil_index_list()), "ev") + (get_base1(Apun,i,"Apun",1) * pe)), - "assigning variable ev"); - } - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 88; - check_greater_or_equal(function__,"mu_Apun",mu_Apun,0); - check_less_or_equal(function__,"mu_Apun",mu_Apun,1); - current_statement_begin__ = 89; - check_greater_or_equal(function__,"mu_Arew",mu_Arew,0); - check_less_or_equal(function__,"mu_Arew",mu_Arew,1); - current_statement_begin__ = 90; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - current_statement_begin__ = 93; - current_statement_begin__ = 96; - current_statement_begin__ = 97; - current_statement_begin__ = 98; - current_statement_begin__ = 101; - - // write generated quantities - vars__.push_back(mu_Apun); - vars__.push_back(mu_Arew); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_c[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_ev_nc[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(mr_pe[k_0__][k_1__][k_2__]); - } - } - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < maxB; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__][k_2__]); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_prl_rp_multipleB"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Apun" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Arew" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Apun"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Arew"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_c" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_ev_nc" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mr_pe" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= maxB; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_pst_gainloss_Q_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_pst_gainloss_Q"); - reader.add_event(113, 111, "end", "model_pst_gainloss_Q"); - return reader; -} - -class model_pst_gainloss_Q : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > option1; - vector > option2; - vector > choice; - vector > reward; - vector_d initial_values; -public: - model_pst_gainloss_Q(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_pst_gainloss_Q(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_pst_gainloss_Q_namespace::model_pst_gainloss_Q"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("option1", "N", N); - validate_non_negative_index("option1", "T", T); - context__.validate_dims("data initialization", "option1", "int", context__.to_vec(N,T)); - validate_non_negative_index("option1", "N", N); - validate_non_negative_index("option1", "T", T); - option1 = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("option1"); - pos__ = 0; - size_t option1_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < option1_limit_1__; ++i_1__) { - size_t option1_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < option1_limit_0__; ++i_0__) { - option1[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("option2", "N", N); - validate_non_negative_index("option2", "T", T); - context__.validate_dims("data initialization", "option2", "int", context__.to_vec(N,T)); - validate_non_negative_index("option2", "N", N); - validate_non_negative_index("option2", "T", T); - option2 = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("option2"); - pos__ = 0; - size_t option2_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < option2_limit_1__; ++i_1__) { - size_t option2_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < option2_limit_0__; ++i_0__) { - option2[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "T", T); - choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 9; - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - context__.validate_dims("data initialization", "reward", "double", context__.to_vec(N,T)); - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - reward = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("reward"); - pos__ = 0; - size_t reward_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { - size_t reward_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { - reward[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"option1[k0__][k1__]",option1[k0__][k1__],-(1)); - check_less_or_equal(function__,"option1[k0__][k1__]",option1[k0__][k1__],6); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"option2[k0__][k1__]",option2[k0__][k1__],-(1)); - check_less_or_equal(function__,"option2[k0__][k1__]",option2[k0__][k1__],6); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],-(1)); - check_less_or_equal(function__,"choice[k0__][k1__]",choice[k0__][k1__],1); - } - } - current_statement_begin__ = 9; - // initialize data variables - current_statement_begin__ = 14; - validate_non_negative_index("initial_values", "6", 6); - initial_values = vector_d(static_cast(6)); - stan::math::fill(initial_values,DUMMY_VAR__); - - current_statement_begin__ = 15; - stan::math::assign(initial_values, rep_vector(0,6)); - - // validate transformed data - current_statement_begin__ = 14; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 20; - validate_non_negative_index("mu", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 21; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 24; - validate_non_negative_index("alpha_pos_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 25; - validate_non_negative_index("alpha_neg_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 26; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_pst_gainloss_Q() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu"))) - throw std::runtime_error("variable mu missing"); - vals_r__ = context__.vals_r("mu"); - pos__ = 0U; - validate_non_negative_index("mu", "3", 3); - context__.validate_dims("initialization", "mu", "vector_d", context__.to_vec(3)); - vector_d mu(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pos_pr"))) - throw std::runtime_error("variable alpha_pos_pr missing"); - vals_r__ = context__.vals_r("alpha_pos_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pos_pr", "N", N); - context__.validate_dims("initialization", "alpha_pos_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pos_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pos_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pos_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pos_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_neg_pr"))) - throw std::runtime_error("variable alpha_neg_pr missing"); - vals_r__ = context__.vals_r("alpha_neg_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_neg_pr", "N", N); - context__.validate_dims("initialization", "alpha_neg_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_neg_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_neg_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_neg_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_neg_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu; - (void) mu; // dummy to suppress unused var warning - if (jacobian__) - mu = in__.vector_constrain(3,lp__); - else - mu = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix alpha_pos_pr; - (void) alpha_pos_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pos_pr = in__.vector_constrain(N,lp__); - else - alpha_pos_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_neg_pr; - (void) alpha_neg_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_neg_pr = in__.vector_constrain(N,lp__); - else - alpha_neg_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 30; - validate_non_negative_index("alpha_pos", "N", N); - Eigen::Matrix alpha_pos(static_cast(N)); - (void) alpha_pos; // dummy to suppress unused var warning - - stan::math::initialize(alpha_pos, DUMMY_VAR__); - stan::math::fill(alpha_pos,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("alpha_neg", "N", N); - Eigen::Matrix alpha_neg(static_cast(N)); - (void) alpha_neg; // dummy to suppress unused var warning - - stan::math::initialize(alpha_neg, DUMMY_VAR__); - stan::math::fill(alpha_neg,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 34; - stan::math::assign(alpha_pos, Phi_approx(add(get_base1(mu,1,"mu",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pos_pr)))); - current_statement_begin__ = 35; - stan::math::assign(alpha_neg, Phi_approx(add(get_base1(mu,2,"mu",1),multiply(get_base1(sigma,2,"sigma",1),alpha_neg_pr)))); - current_statement_begin__ = 36; - stan::math::assign(beta, multiply(Phi_approx(add(get_base1(mu,3,"mu",1),multiply(get_base1(sigma,3,"sigma",1),beta_pr))),10)); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha_pos(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha_pos" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha_neg(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha_neg" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 30; - check_greater_or_equal(function__,"alpha_pos",alpha_pos,0); - check_less_or_equal(function__,"alpha_pos",alpha_pos,1); - current_statement_begin__ = 31; - check_greater_or_equal(function__,"alpha_neg",alpha_neg,0); - check_less_or_equal(function__,"alpha_neg",alpha_neg,1); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // model body - - current_statement_begin__ = 41; - lp_accum__.add(normal_log(mu, 0, 1)); - current_statement_begin__ = 42; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(alpha_pos_pr, 0, 1)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(alpha_neg_pr, 0, 1)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 49; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 50; - int co(0); - (void) co; // dummy to suppress unused var warning - - stan::math::fill(co, std::numeric_limits::min()); - current_statement_begin__ = 51; - local_scalar_t__ delta; - (void) delta; // dummy to suppress unused var warning - - stan::math::initialize(delta, DUMMY_VAR__); - stan::math::fill(delta,DUMMY_VAR__); - current_statement_begin__ = 52; - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - current_statement_begin__ = 53; - local_scalar_t__ alpha; - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 54; - validate_non_negative_index("ev", "6", 6); - Eigen::Matrix ev(static_cast(6)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - - - current_statement_begin__ = 56; - stan::math::assign(ev, initial_values); - current_statement_begin__ = 59; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 60; - stan::math::assign(co, (logical_gt(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),0) ? get_base1(get_base1(option1,i,"option1",1),t,"option1",2) : get_base1(get_base1(option2,i,"option2",1),t,"option2",2) )); - current_statement_begin__ = 63; - stan::math::assign(delta, (get_base1(ev,get_base1(get_base1(option1,i,"option1",1),t,"option1",2),"ev",1) - get_base1(ev,get_base1(get_base1(option2,i,"option2",1),t,"option2",2),"ev",1))); - current_statement_begin__ = 64; - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * delta))); - current_statement_begin__ = 66; - stan::math::assign(pe, (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(ev,co,"ev",1))); - current_statement_begin__ = 67; - stan::math::assign(alpha, (logical_gte(pe,0) ? stan::math::promote_scalar(get_base1(alpha_pos,i,"alpha_pos",1)) : stan::math::promote_scalar(get_base1(alpha_neg,i,"alpha_neg",1)) )); - current_statement_begin__ = 68; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(co), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(co), stan::model::nil_index_list()), "ev") + (alpha * pe)), - "assigning variable ev"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu"); - names__.push_back("sigma"); - names__.push_back("alpha_pos_pr"); - names__.push_back("alpha_neg_pr"); - names__.push_back("beta_pr"); - names__.push_back("alpha_pos"); - names__.push_back("alpha_neg"); - names__.push_back("beta"); - names__.push_back("mu_alpha_pos"); - names__.push_back("mu_alpha_neg"); - names__.push_back("mu_beta"); - names__.push_back("log_lik"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_pst_gainloss_Q_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d alpha_pos_pr = in__.vector_constrain(N); - vector_d alpha_neg_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pos_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_neg_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 30; - validate_non_negative_index("alpha_pos", "N", N); - Eigen::Matrix alpha_pos(static_cast(N)); - (void) alpha_pos; // dummy to suppress unused var warning - - stan::math::initialize(alpha_pos, DUMMY_VAR__); - stan::math::fill(alpha_pos,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("alpha_neg", "N", N); - Eigen::Matrix alpha_neg(static_cast(N)); - (void) alpha_neg; // dummy to suppress unused var warning - - stan::math::initialize(alpha_neg, DUMMY_VAR__); - stan::math::fill(alpha_neg,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - - - current_statement_begin__ = 34; - stan::math::assign(alpha_pos, Phi_approx(add(get_base1(mu,1,"mu",1),multiply(get_base1(sigma,1,"sigma",1),alpha_pos_pr)))); - current_statement_begin__ = 35; - stan::math::assign(alpha_neg, Phi_approx(add(get_base1(mu,2,"mu",1),multiply(get_base1(sigma,2,"sigma",1),alpha_neg_pr)))); - current_statement_begin__ = 36; - stan::math::assign(beta, multiply(Phi_approx(add(get_base1(mu,3,"mu",1),multiply(get_base1(sigma,3,"sigma",1),beta_pr))),10)); - - // validate transformed parameters - current_statement_begin__ = 30; - check_greater_or_equal(function__,"alpha_pos",alpha_pos,0); - check_less_or_equal(function__,"alpha_pos",alpha_pos,1); - current_statement_begin__ = 31; - check_greater_or_equal(function__,"alpha_neg",alpha_neg,0); - check_less_or_equal(function__,"alpha_neg",alpha_neg,1); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"beta",beta,0); - check_less_or_equal(function__,"beta",beta,10); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pos[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_neg[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 75; - local_scalar_t__ mu_alpha_pos; - (void) mu_alpha_pos; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha_pos, DUMMY_VAR__); - stan::math::fill(mu_alpha_pos,DUMMY_VAR__); - current_statement_begin__ = 76; - local_scalar_t__ mu_alpha_neg; - (void) mu_alpha_neg; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha_neg, DUMMY_VAR__); - stan::math::fill(mu_alpha_neg,DUMMY_VAR__); - current_statement_begin__ = 77; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 80; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - - - current_statement_begin__ = 82; - stan::math::assign(mu_alpha_pos, Phi_approx(get_base1(mu,1,"mu",1))); - current_statement_begin__ = 83; - stan::math::assign(mu_alpha_neg, Phi_approx(get_base1(mu,2,"mu",1))); - current_statement_begin__ = 84; - stan::math::assign(mu_beta, (Phi_approx(get_base1(mu,3,"mu",1)) * 10)); - - current_statement_begin__ = 87; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 88; - int co(0); - (void) co; // dummy to suppress unused var warning - - stan::math::fill(co, std::numeric_limits::min()); - current_statement_begin__ = 89; - local_scalar_t__ delta; - (void) delta; // dummy to suppress unused var warning - - stan::math::initialize(delta, DUMMY_VAR__); - stan::math::fill(delta,DUMMY_VAR__); - current_statement_begin__ = 90; - local_scalar_t__ pe; - (void) pe; // dummy to suppress unused var warning - - stan::math::initialize(pe, DUMMY_VAR__); - stan::math::fill(pe,DUMMY_VAR__); - current_statement_begin__ = 91; - local_scalar_t__ alpha; - (void) alpha; // dummy to suppress unused var warning - - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 92; - validate_non_negative_index("ev", "6", 6); - Eigen::Matrix ev(static_cast(6)); - (void) ev; // dummy to suppress unused var warning - - stan::math::initialize(ev, DUMMY_VAR__); - stan::math::fill(ev,DUMMY_VAR__); - - - current_statement_begin__ = 94; - stan::math::assign(ev, initial_values); - current_statement_begin__ = 95; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 98; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 99; - stan::math::assign(co, (logical_gt(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),0) ? get_base1(get_base1(option1,i,"option1",1),t,"option1",2) : get_base1(get_base1(option2,i,"option2",1),t,"option2",2) )); - current_statement_begin__ = 102; - stan::math::assign(delta, (get_base1(ev,get_base1(get_base1(option1,i,"option1",1),t,"option1",2),"ev",1) - get_base1(ev,get_base1(get_base1(option2,i,"option2",1),t,"option2",2),"ev",1))); - current_statement_begin__ = 103; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (stan::model::rvalue(log_lik, stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), "log_lik") + bernoulli_logit_log(get_base1(get_base1(choice,i,"choice",1),t,"choice",2),(get_base1(beta,i,"beta",1) * delta))), - "assigning variable log_lik"); - current_statement_begin__ = 105; - stan::math::assign(pe, (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(ev,co,"ev",1))); - current_statement_begin__ = 106; - stan::math::assign(alpha, (logical_gte(pe,0) ? stan::math::promote_scalar(get_base1(alpha_pos,i,"alpha_pos",1)) : stan::math::promote_scalar(get_base1(alpha_neg,i,"alpha_neg",1)) )); - current_statement_begin__ = 107; - stan::model::assign(ev, - stan::model::cons_list(stan::model::index_uni(co), stan::model::nil_index_list()), - (stan::model::rvalue(ev, stan::model::cons_list(stan::model::index_uni(co), stan::model::nil_index_list()), "ev") + (alpha * pe)), - "assigning variable ev"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 75; - check_greater_or_equal(function__,"mu_alpha_pos",mu_alpha_pos,0); - check_less_or_equal(function__,"mu_alpha_pos",mu_alpha_pos,1); - current_statement_begin__ = 76; - check_greater_or_equal(function__,"mu_alpha_neg",mu_alpha_neg,0); - check_less_or_equal(function__,"mu_alpha_neg",mu_alpha_neg,1); - current_statement_begin__ = 77; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - check_less_or_equal(function__,"mu_beta",mu_beta,10); - current_statement_begin__ = 80; - - // write generated quantities - vars__.push_back(mu_alpha_pos); - vars__.push_back(mu_alpha_neg); - vars__.push_back(mu_beta); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_pst_gainloss_Q"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pos_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_neg_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pos" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_neg" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha_pos"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha_neg"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ra_noLA_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ra_noLA"); - reader.add_event(94, 92, "end", "model_ra_noLA"); - return reader; -} - -class model_ra_noLA : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > gamble; - vector > gain; - vector > cert; - vector > loss; -public: - model_ra_noLA(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ra_noLA(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ra_noLA_namespace::model_ra_noLA"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - context__.validate_dims("data initialization", "gamble", "int", context__.to_vec(N,T)); - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - gamble = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("gamble"); - pos__ = 0; - size_t gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gamble_limit_1__; ++i_1__) { - size_t gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gamble_limit_0__; ++i_0__) { - gamble[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - context__.validate_dims("data initialization", "gain", "double", context__.to_vec(N,T)); - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - gain = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("gain"); - pos__ = 0; - size_t gain_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gain_limit_1__; ++i_1__) { - size_t gain_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gain_limit_0__; ++i_0__) { - gain[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - context__.validate_dims("data initialization", "cert", "double", context__.to_vec(N,T)); - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - cert = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("cert"); - pos__ = 0; - size_t cert_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cert_limit_1__; ++i_1__) { - size_t cert_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cert_limit_0__; ++i_0__) { - cert[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - context__.validate_dims("data initialization", "loss", "double", context__.to_vec(N,T)); - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - loss = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("loss"); - pos__ = 0; - size_t loss_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < loss_limit_1__; ++i_1__) { - size_t loss_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < loss_limit_0__; ++i_0__) { - loss[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],-(1)); - check_less_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],1); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gain[k0__][k1__]",gain[k0__][k1__],0); - } - } - current_statement_begin__ = 7; - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"loss[k0__][k1__]",loss[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 17; - validate_non_negative_index("rho_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 18; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ra_noLA() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("rho_p"))) - throw std::runtime_error("variable rho_p missing"); - vals_r__ = context__.vals_r("rho_p"); - pos__ = 0U; - validate_non_negative_index("rho_p", "N", N); - context__.validate_dims("initialization", "rho_p", "vector_d", context__.to_vec(N)); - vector_d rho_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix rho_p; - (void) rho_p; // dummy to suppress unused var warning - if (jacobian__) - rho_p = in__.vector_constrain(N,lp__); - else - rho_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 22; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - current_statement_begin__ = 23; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 25; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 26; - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - } - current_statement_begin__ = 28; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 22; - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - current_statement_begin__ = 23; - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - current_statement_begin__ = 34; - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - current_statement_begin__ = 35; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 38; - lp_accum__.add(normal_log(rho_p, 0, 1.0)); - current_statement_begin__ = 39; - lp_accum__.add(normal_log(tau_p, 0, 1.0)); - current_statement_begin__ = 41; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 42; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 43; - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - current_statement_begin__ = 44; - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - current_statement_begin__ = 45; - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - current_statement_begin__ = 47; - stan::math::assign(evSafe, pow(get_base1(get_base1(cert,i,"cert",1),t,"cert",2),get_base1(rho,i,"rho",1))); - current_statement_begin__ = 48; - stan::math::assign(evGamble, (0.5 * (pow(get_base1(get_base1(gain,i,"gain",1),t,"gain",2),get_base1(rho,i,"rho",1)) - pow(get_base1(get_base1(loss,i,"loss",1),t,"loss",2),get_base1(rho,i,"rho",1))))); - current_statement_begin__ = 49; - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - current_statement_begin__ = 50; - lp_accum__.add(bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2), pGamble)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("rho_p"); - names__.push_back("tau_p"); - names__.push_back("rho"); - names__.push_back("tau"); - names__.push_back("mu_rho"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ra_noLA_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d rho_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 22; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - current_statement_begin__ = 23; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 25; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 26; - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - } - current_statement_begin__ = 28; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - - // validate transformed parameters - current_statement_begin__ = 22; - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - current_statement_begin__ = 23; - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 55; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 56; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - current_statement_begin__ = 58; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 61; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 64; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 65; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 66; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 70; - stan::math::assign(mu_rho, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - current_statement_begin__ = 71; - stan::math::assign(mu_tau, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - - current_statement_begin__ = 74; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 75; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 76; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 77; - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - current_statement_begin__ = 78; - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - current_statement_begin__ = 82; - stan::math::assign(evSafe, pow(get_base1(get_base1(cert,i,"cert",1),t,"cert",2),get_base1(rho,i,"rho",1))); - current_statement_begin__ = 83; - stan::math::assign(evGamble, (0.5 * (pow(get_base1(get_base1(gain,i,"gain",1),t,"gain",2),get_base1(rho,i,"rho",1)) - pow(get_base1(get_base1(loss,i,"loss",1),t,"loss",2),get_base1(rho,i,"rho",1))))); - current_statement_begin__ = 84; - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - current_statement_begin__ = 85; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),pGamble))), - "assigning variable log_lik"); - current_statement_begin__ = 88; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(pGamble, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 55; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - check_less_or_equal(function__,"mu_rho",mu_rho,2); - current_statement_begin__ = 56; - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - current_statement_begin__ = 58; - current_statement_begin__ = 61; - - // write generated quantities - vars__.push_back(mu_rho); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ra_noLA"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ra_noRA_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ra_noRA"); - reader.add_event(94, 92, "end", "model_ra_noRA"); - return reader; -} - -class model_ra_noRA : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > gamble; - vector > gain; - vector > cert; - vector > loss; -public: - model_ra_noRA(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ra_noRA(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ra_noRA_namespace::model_ra_noRA"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - context__.validate_dims("data initialization", "gamble", "int", context__.to_vec(N,T)); - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - gamble = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("gamble"); - pos__ = 0; - size_t gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gamble_limit_1__; ++i_1__) { - size_t gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gamble_limit_0__; ++i_0__) { - gamble[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - context__.validate_dims("data initialization", "gain", "double", context__.to_vec(N,T)); - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - gain = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("gain"); - pos__ = 0; - size_t gain_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gain_limit_1__; ++i_1__) { - size_t gain_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gain_limit_0__; ++i_0__) { - gain[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - context__.validate_dims("data initialization", "cert", "double", context__.to_vec(N,T)); - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - cert = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("cert"); - pos__ = 0; - size_t cert_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cert_limit_1__; ++i_1__) { - size_t cert_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cert_limit_0__; ++i_0__) { - cert[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - context__.validate_dims("data initialization", "loss", "double", context__.to_vec(N,T)); - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - loss = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("loss"); - pos__ = 0; - size_t loss_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < loss_limit_1__; ++i_1__) { - size_t loss_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < loss_limit_0__; ++i_0__) { - loss[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],-(1)); - check_less_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],1); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gain[k0__][k1__]",gain[k0__][k1__],0); - } - } - current_statement_begin__ = 7; - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"loss[k0__][k1__]",loss[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "2", 2); - num_params_r__ += 2; - current_statement_begin__ = 17; - validate_non_negative_index("lambda_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 18; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ra_noRA() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "2", 2); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(2)); - vector_d mu_p(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "2", 2); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(2)); - vector_d sigma(static_cast(2)); - for (int j1__ = 0U; j1__ < 2; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("lambda_p"))) - throw std::runtime_error("variable lambda_p missing"); - vals_r__ = context__.vals_r("lambda_p"); - pos__ = 0U; - validate_non_negative_index("lambda_p", "N", N); - context__.validate_dims("initialization", "lambda_p", "vector_d", context__.to_vec(N)); - vector_d lambda_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(2,lp__); - else - mu_p = in__.vector_constrain(2); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,2,lp__); - else - sigma = in__.vector_lb_constrain(0,2); - - Eigen::Matrix lambda_p; - (void) lambda_p; // dummy to suppress unused var warning - if (jacobian__) - lambda_p = in__.vector_constrain(N,lp__); - else - lambda_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 22; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - current_statement_begin__ = 23; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 25; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 26; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(lambda_p,i,"lambda_p",1)))) * 5), - "assigning variable lambda"); - } - current_statement_begin__ = 28; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 22; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,5); - current_statement_begin__ = 23; - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - current_statement_begin__ = 34; - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - current_statement_begin__ = 35; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 38; - lp_accum__.add(normal_log(lambda_p, 0, 1.0)); - current_statement_begin__ = 39; - lp_accum__.add(normal_log(tau_p, 0, 1.0)); - current_statement_begin__ = 41; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 42; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 43; - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - current_statement_begin__ = 44; - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - current_statement_begin__ = 45; - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - current_statement_begin__ = 48; - stan::math::assign(evSafe, get_base1(get_base1(cert,i,"cert",1),t,"cert",2)); - current_statement_begin__ = 49; - stan::math::assign(evGamble, (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - (get_base1(lambda,i,"lambda",1) * get_base1(get_base1(loss,i,"loss",1),t,"loss",2))))); - current_statement_begin__ = 50; - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - current_statement_begin__ = 51; - lp_accum__.add(bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2), pGamble)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("lambda_p"); - names__.push_back("tau_p"); - names__.push_back("lambda"); - names__.push_back("tau"); - names__.push_back("mu_lambda"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(2); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ra_noRA_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(2); - vector_d sigma = in__.vector_lb_constrain(0,2); - vector_d lambda_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 2; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 22; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - current_statement_begin__ = 23; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 25; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 26; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(lambda_p,i,"lambda_p",1)))) * 5), - "assigning variable lambda"); - } - current_statement_begin__ = 28; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),tau_p)))); - - // validate transformed parameters - current_statement_begin__ = 22; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,5); - current_statement_begin__ = 23; - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 56; - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - current_statement_begin__ = 57; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - current_statement_begin__ = 59; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 62; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 65; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 66; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 67; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 71; - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 5)); - current_statement_begin__ = 72; - stan::math::assign(mu_tau, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - - current_statement_begin__ = 75; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 76; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 77; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 78; - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - current_statement_begin__ = 79; - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - current_statement_begin__ = 80; - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - current_statement_begin__ = 82; - stan::math::assign(evSafe, get_base1(get_base1(cert,i,"cert",1),t,"cert",2)); - current_statement_begin__ = 83; - stan::math::assign(evGamble, (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - (get_base1(lambda,i,"lambda",1) * get_base1(get_base1(loss,i,"loss",1),t,"loss",2))))); - current_statement_begin__ = 84; - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - current_statement_begin__ = 85; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),pGamble))), - "assigning variable log_lik"); - current_statement_begin__ = 88; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(pGamble, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 56; - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,5); - current_statement_begin__ = 57; - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - current_statement_begin__ = 59; - current_statement_begin__ = 62; - - // write generated quantities - vars__.push_back(mu_lambda); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ra_noRA"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 2; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ra_prospect_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ra_prospect"); - reader.add_event(96, 94, "end", "model_ra_prospect"); - return reader; -} - -class model_ra_prospect : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > gamble; - vector > cert; - vector > gain; - vector > loss; -public: - model_ra_prospect(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ra_prospect(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ra_prospect_namespace::model_ra_prospect"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - context__.validate_dims("data initialization", "gamble", "int", context__.to_vec(N,T)); - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - gamble = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("gamble"); - pos__ = 0; - size_t gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gamble_limit_1__; ++i_1__) { - size_t gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gamble_limit_0__; ++i_0__) { - gamble[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - context__.validate_dims("data initialization", "cert", "double", context__.to_vec(N,T)); - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - cert = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("cert"); - pos__ = 0; - size_t cert_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cert_limit_1__; ++i_1__) { - size_t cert_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cert_limit_0__; ++i_0__) { - cert[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - context__.validate_dims("data initialization", "gain", "double", context__.to_vec(N,T)); - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - gain = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("gain"); - pos__ = 0; - size_t gain_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gain_limit_1__; ++i_1__) { - size_t gain_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gain_limit_0__; ++i_0__) { - gain[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - context__.validate_dims("data initialization", "loss", "double", context__.to_vec(N,T)); - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - loss = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("loss"); - pos__ = 0; - size_t loss_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < loss_limit_1__; ++i_1__) { - size_t loss_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < loss_limit_0__; ++i_0__) { - loss[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],-(1)); - check_less_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],1); - } - } - current_statement_begin__ = 6; - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gain[k0__][k1__]",gain[k0__][k1__],0); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"loss[k0__][k1__]",loss[k0__][k1__],0); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 13; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 14; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 15; - validate_non_negative_index("rho_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 16; - validate_non_negative_index("lambda_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 17; - validate_non_negative_index("tau_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ra_prospect() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("rho_p"))) - throw std::runtime_error("variable rho_p missing"); - vals_r__ = context__.vals_r("rho_p"); - pos__ = 0U; - validate_non_negative_index("rho_p", "N", N); - context__.validate_dims("initialization", "rho_p", "vector_d", context__.to_vec(N)); - vector_d rho_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - rho_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(rho_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable rho_p: ") + e.what()); - } - - if (!(context__.contains_r("lambda_p"))) - throw std::runtime_error("variable lambda_p missing"); - vals_r__ = context__.vals_r("lambda_p"); - pos__ = 0U; - validate_non_negative_index("lambda_p", "N", N); - context__.validate_dims("initialization", "lambda_p", "vector_d", context__.to_vec(N)); - vector_d lambda_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_p: ") + e.what()); - } - - if (!(context__.contains_r("tau_p"))) - throw std::runtime_error("variable tau_p missing"); - vals_r__ = context__.vals_r("tau_p"); - pos__ = 0U; - validate_non_negative_index("tau_p", "N", N); - context__.validate_dims("initialization", "tau_p", "vector_d", context__.to_vec(N)); - vector_d tau_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix rho_p; - (void) rho_p; // dummy to suppress unused var warning - if (jacobian__) - rho_p = in__.vector_constrain(N,lp__); - else - rho_p = in__.vector_constrain(N); - - Eigen::Matrix lambda_p; - (void) lambda_p; // dummy to suppress unused var warning - if (jacobian__) - lambda_p = in__.vector_constrain(N,lp__); - else - lambda_p = in__.vector_constrain(N); - - Eigen::Matrix tau_p; - (void) tau_p; // dummy to suppress unused var warning - if (jacobian__) - tau_p = in__.vector_constrain(N,lp__); - else - tau_p = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 20; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - current_statement_begin__ = 21; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - current_statement_begin__ = 22; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 24; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 25; - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - current_statement_begin__ = 26; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(lambda_p,i,"lambda_p",1)))) * 5), - "assigning variable lambda"); - } - current_statement_begin__ = 28; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),tau_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(rho(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: rho" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 20; - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - current_statement_begin__ = 21; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,5); - current_statement_begin__ = 22; - check_greater_or_equal(function__,"tau",tau,0); - - // model body - - current_statement_begin__ = 33; - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - current_statement_begin__ = 34; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 37; - lp_accum__.add(normal_log(rho_p, 0, 1.0)); - current_statement_begin__ = 38; - lp_accum__.add(normal_log(lambda_p, 0, 1.0)); - current_statement_begin__ = 39; - lp_accum__.add(normal_log(tau_p, 0, 1.0)); - current_statement_begin__ = 41; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 42; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 43; - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - current_statement_begin__ = 44; - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - current_statement_begin__ = 45; - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - current_statement_begin__ = 48; - stan::math::assign(evSafe, pow(get_base1(get_base1(cert,i,"cert",1),t,"cert",2),get_base1(rho,i,"rho",1))); - current_statement_begin__ = 49; - stan::math::assign(evGamble, (0.5 * (pow(get_base1(get_base1(gain,i,"gain",1),t,"gain",2),get_base1(rho,i,"rho",1)) - (get_base1(lambda,i,"lambda",1) * pow(get_base1(get_base1(loss,i,"loss",1),t,"loss",2),get_base1(rho,i,"rho",1)))))); - current_statement_begin__ = 50; - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - current_statement_begin__ = 51; - lp_accum__.add(bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2), pGamble)); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("rho_p"); - names__.push_back("lambda_p"); - names__.push_back("tau_p"); - names__.push_back("rho"); - names__.push_back("lambda"); - names__.push_back("tau"); - names__.push_back("mu_rho"); - names__.push_back("mu_lambda"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ra_prospect_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d rho_p = in__.vector_constrain(N); - vector_d lambda_p = in__.vector_constrain(N); - vector_d tau_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 20; - validate_non_negative_index("rho", "N", N); - Eigen::Matrix rho(static_cast(N)); - (void) rho; // dummy to suppress unused var warning - - stan::math::initialize(rho, DUMMY_VAR__); - stan::math::fill(rho,DUMMY_VAR__); - current_statement_begin__ = 21; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - current_statement_begin__ = 22; - validate_non_negative_index("tau", "N", N); - Eigen::Matrix tau(static_cast(N)); - (void) tau; // dummy to suppress unused var warning - - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 24; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 25; - stan::model::assign(rho, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(rho_p,i,"rho_p",1)))) * 2), - "assigning variable rho"); - current_statement_begin__ = 26; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(lambda_p,i,"lambda_p",1)))) * 5), - "assigning variable lambda"); - } - current_statement_begin__ = 28; - stan::math::assign(tau, stan::math::exp(add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),tau_p)))); - - // validate transformed parameters - current_statement_begin__ = 20; - check_greater_or_equal(function__,"rho",rho,0); - check_less_or_equal(function__,"rho",rho,2); - current_statement_begin__ = 21; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,5); - current_statement_begin__ = 22; - check_greater_or_equal(function__,"tau",tau,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(rho[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 56; - local_scalar_t__ mu_rho; - (void) mu_rho; // dummy to suppress unused var warning - - stan::math::initialize(mu_rho, DUMMY_VAR__); - stan::math::fill(mu_rho,DUMMY_VAR__); - current_statement_begin__ = 57; - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - current_statement_begin__ = 58; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - current_statement_begin__ = 60; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 63; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 66; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 67; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 68; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 72; - stan::math::assign(mu_rho, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 2)); - current_statement_begin__ = 73; - stan::math::assign(mu_lambda, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 5)); - current_statement_begin__ = 74; - stan::math::assign(mu_tau, stan::math::exp(get_base1(mu_p,3,"mu_p",1))); - - current_statement_begin__ = 77; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 78; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 79; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - { - current_statement_begin__ = 80; - local_scalar_t__ evSafe; - (void) evSafe; // dummy to suppress unused var warning - - stan::math::initialize(evSafe, DUMMY_VAR__); - stan::math::fill(evSafe,DUMMY_VAR__); - current_statement_begin__ = 81; - local_scalar_t__ evGamble; - (void) evGamble; // dummy to suppress unused var warning - - stan::math::initialize(evGamble, DUMMY_VAR__); - stan::math::fill(evGamble,DUMMY_VAR__); - current_statement_begin__ = 82; - local_scalar_t__ pGamble; - (void) pGamble; // dummy to suppress unused var warning - - stan::math::initialize(pGamble, DUMMY_VAR__); - stan::math::fill(pGamble,DUMMY_VAR__); - - - current_statement_begin__ = 84; - stan::math::assign(evSafe, pow(get_base1(get_base1(cert,i,"cert",1),t,"cert",2),get_base1(rho,i,"rho",1))); - current_statement_begin__ = 85; - stan::math::assign(evGamble, (0.5 * (pow(get_base1(get_base1(gain,i,"gain",1),t,"gain",2),get_base1(rho,i,"rho",1)) - (get_base1(lambda,i,"lambda",1) * pow(stan::math::fabs(get_base1(get_base1(loss,i,"loss",1),t,"loss",2)),get_base1(rho,i,"rho",1)))))); - current_statement_begin__ = 86; - stan::math::assign(pGamble, inv_logit((get_base1(tau,i,"tau",1) * (evGamble - evSafe)))); - current_statement_begin__ = 87; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),pGamble))), - "assigning variable log_lik"); - current_statement_begin__ = 90; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(pGamble, base_rng__), - "assigning variable y_pred"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 56; - check_greater_or_equal(function__,"mu_rho",mu_rho,0); - check_less_or_equal(function__,"mu_rho",mu_rho,2); - current_statement_begin__ = 57; - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,5); - current_statement_begin__ = 58; - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - current_statement_begin__ = 60; - current_statement_begin__ = 63; - - // write generated quantities - vars__.push_back(mu_rho); - vars__.push_back(mu_lambda); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ra_prospect"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "rho" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_rho"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_rdt_happiness_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_rdt_happiness"); - reader.add_event(145, 143, "end", "model_rdt_happiness"); - return reader; -} - -class model_rdt_happiness : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > gamble; - vector > type; - vector > cert; - vector > gain; - vector > loss; - vector > outcome; - vector > happy; - vector > RT_happy; -public: - model_rdt_happiness(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_rdt_happiness(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_rdt_happiness_namespace::model_rdt_happiness"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - context__.validate_dims("data initialization", "gamble", "int", context__.to_vec(N,T)); - validate_non_negative_index("gamble", "N", N); - validate_non_negative_index("gamble", "T", T); - gamble = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("gamble"); - pos__ = 0; - size_t gamble_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gamble_limit_1__; ++i_1__) { - size_t gamble_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gamble_limit_0__; ++i_0__) { - gamble[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("type", "N", N); - validate_non_negative_index("type", "T", T); - context__.validate_dims("data initialization", "type", "int", context__.to_vec(N,T)); - validate_non_negative_index("type", "N", N); - validate_non_negative_index("type", "T", T); - type = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("type"); - pos__ = 0; - size_t type_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < type_limit_1__; ++i_1__) { - size_t type_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < type_limit_0__; ++i_0__) { - type[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - context__.validate_dims("data initialization", "cert", "double", context__.to_vec(N,T)); - validate_non_negative_index("cert", "N", N); - validate_non_negative_index("cert", "T", T); - cert = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("cert"); - pos__ = 0; - size_t cert_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < cert_limit_1__; ++i_1__) { - size_t cert_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < cert_limit_0__; ++i_0__) { - cert[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - context__.validate_dims("data initialization", "gain", "double", context__.to_vec(N,T)); - validate_non_negative_index("gain", "N", N); - validate_non_negative_index("gain", "T", T); - gain = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("gain"); - pos__ = 0; - size_t gain_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < gain_limit_1__; ++i_1__) { - size_t gain_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < gain_limit_0__; ++i_0__) { - gain[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 9; - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - context__.validate_dims("data initialization", "loss", "double", context__.to_vec(N,T)); - validate_non_negative_index("loss", "N", N); - validate_non_negative_index("loss", "T", T); - loss = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("loss"); - pos__ = 0; - size_t loss_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < loss_limit_1__; ++i_1__) { - size_t loss_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < loss_limit_0__; ++i_0__) { - loss[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 10; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "double", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 11; - validate_non_negative_index("happy", "N", N); - validate_non_negative_index("happy", "T", T); - context__.validate_dims("data initialization", "happy", "double", context__.to_vec(N,T)); - validate_non_negative_index("happy", "N", N); - validate_non_negative_index("happy", "T", T); - happy = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("happy"); - pos__ = 0; - size_t happy_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < happy_limit_1__; ++i_1__) { - size_t happy_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < happy_limit_0__; ++i_0__) { - happy[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 12; - validate_non_negative_index("RT_happy", "N", N); - validate_non_negative_index("RT_happy", "T", T); - context__.validate_dims("data initialization", "RT_happy", "double", context__.to_vec(N,T)); - validate_non_negative_index("RT_happy", "N", N); - validate_non_negative_index("RT_happy", "T", T); - RT_happy = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("RT_happy"); - pos__ = 0; - size_t RT_happy_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < RT_happy_limit_1__; ++i_1__) { - size_t RT_happy_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < RT_happy_limit_0__; ++i_0__) { - RT_happy[i_0__][i_1__] = vals_r__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],-(1)); - check_less_or_equal(function__,"gamble[k0__][k1__]",gamble[k0__][k1__],1); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"type[k0__][k1__]",type[k0__][k1__],-(1)); - check_less_or_equal(function__,"type[k0__][k1__]",type[k0__][k1__],1); - } - } - current_statement_begin__ = 7; - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"gain[k0__][k1__]",gain[k0__][k1__],0); - } - } - current_statement_begin__ = 9; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"loss[k0__][k1__]",loss[k0__][k1__],0); - } - } - current_statement_begin__ = 10; - current_statement_begin__ = 11; - current_statement_begin__ = 12; - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 17; - validate_non_negative_index("mu_p", "6", 6); - num_params_r__ += 6; - current_statement_begin__ = 18; - validate_non_negative_index("sigma", "6", 6); - num_params_r__ += 6; - current_statement_begin__ = 19; - validate_non_negative_index("w0_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("w1_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("w2_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("w3_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("gam_p", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("sig_p", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_rdt_happiness() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "6", 6); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(6)); - vector_d mu_p(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "6", 6); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); - vector_d sigma(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("w0_p"))) - throw std::runtime_error("variable w0_p missing"); - vals_r__ = context__.vals_r("w0_p"); - pos__ = 0U; - validate_non_negative_index("w0_p", "N", N); - context__.validate_dims("initialization", "w0_p", "vector_d", context__.to_vec(N)); - vector_d w0_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w0_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w0_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w0_p: ") + e.what()); - } - - if (!(context__.contains_r("w1_p"))) - throw std::runtime_error("variable w1_p missing"); - vals_r__ = context__.vals_r("w1_p"); - pos__ = 0U; - validate_non_negative_index("w1_p", "N", N); - context__.validate_dims("initialization", "w1_p", "vector_d", context__.to_vec(N)); - vector_d w1_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w1_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w1_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w1_p: ") + e.what()); - } - - if (!(context__.contains_r("w2_p"))) - throw std::runtime_error("variable w2_p missing"); - vals_r__ = context__.vals_r("w2_p"); - pos__ = 0U; - validate_non_negative_index("w2_p", "N", N); - context__.validate_dims("initialization", "w2_p", "vector_d", context__.to_vec(N)); - vector_d w2_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w2_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w2_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w2_p: ") + e.what()); - } - - if (!(context__.contains_r("w3_p"))) - throw std::runtime_error("variable w3_p missing"); - vals_r__ = context__.vals_r("w3_p"); - pos__ = 0U; - validate_non_negative_index("w3_p", "N", N); - context__.validate_dims("initialization", "w3_p", "vector_d", context__.to_vec(N)); - vector_d w3_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w3_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w3_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w3_p: ") + e.what()); - } - - if (!(context__.contains_r("gam_p"))) - throw std::runtime_error("variable gam_p missing"); - vals_r__ = context__.vals_r("gam_p"); - pos__ = 0U; - validate_non_negative_index("gam_p", "N", N); - context__.validate_dims("initialization", "gam_p", "vector_d", context__.to_vec(N)); - vector_d gam_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - gam_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(gam_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable gam_p: ") + e.what()); - } - - if (!(context__.contains_r("sig_p"))) - throw std::runtime_error("variable sig_p missing"); - vals_r__ = context__.vals_r("sig_p"); - pos__ = 0U; - validate_non_negative_index("sig_p", "N", N); - context__.validate_dims("initialization", "sig_p", "vector_d", context__.to_vec(N)); - vector_d sig_p(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - sig_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(sig_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sig_p: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(6,lp__); - else - mu_p = in__.vector_constrain(6); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,6,lp__); - else - sigma = in__.vector_lb_constrain(0,6); - - Eigen::Matrix w0_p; - (void) w0_p; // dummy to suppress unused var warning - if (jacobian__) - w0_p = in__.vector_constrain(N,lp__); - else - w0_p = in__.vector_constrain(N); - - Eigen::Matrix w1_p; - (void) w1_p; // dummy to suppress unused var warning - if (jacobian__) - w1_p = in__.vector_constrain(N,lp__); - else - w1_p = in__.vector_constrain(N); - - Eigen::Matrix w2_p; - (void) w2_p; // dummy to suppress unused var warning - if (jacobian__) - w2_p = in__.vector_constrain(N,lp__); - else - w2_p = in__.vector_constrain(N); - - Eigen::Matrix w3_p; - (void) w3_p; // dummy to suppress unused var warning - if (jacobian__) - w3_p = in__.vector_constrain(N,lp__); - else - w3_p = in__.vector_constrain(N); - - Eigen::Matrix gam_p; - (void) gam_p; // dummy to suppress unused var warning - if (jacobian__) - gam_p = in__.vector_constrain(N,lp__); - else - gam_p = in__.vector_constrain(N); - - Eigen::Matrix sig_p; - (void) sig_p; // dummy to suppress unused var warning - if (jacobian__) - sig_p = in__.vector_constrain(N,lp__); - else - sig_p = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 27; - validate_non_negative_index("w0", "N", N); - Eigen::Matrix w0(static_cast(N)); - (void) w0; // dummy to suppress unused var warning - - stan::math::initialize(w0, DUMMY_VAR__); - stan::math::fill(w0,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("w1", "N", N); - Eigen::Matrix w1(static_cast(N)); - (void) w1; // dummy to suppress unused var warning - - stan::math::initialize(w1, DUMMY_VAR__); - stan::math::fill(w1,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("w2", "N", N); - Eigen::Matrix w2(static_cast(N)); - (void) w2; // dummy to suppress unused var warning - - stan::math::initialize(w2, DUMMY_VAR__); - stan::math::fill(w2,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("w3", "N", N); - Eigen::Matrix w3(static_cast(N)); - (void) w3; // dummy to suppress unused var warning - - stan::math::initialize(w3, DUMMY_VAR__); - stan::math::fill(w3,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("gam", "N", N); - Eigen::Matrix gam(static_cast(N)); - (void) gam; // dummy to suppress unused var warning - - stan::math::initialize(gam, DUMMY_VAR__); - stan::math::fill(gam,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("sig", "N", N); - Eigen::Matrix sig(static_cast(N)); - (void) sig; // dummy to suppress unused var warning - - stan::math::initialize(sig, DUMMY_VAR__); - stan::math::fill(sig,DUMMY_VAR__); - - - current_statement_begin__ = 34; - stan::math::assign(w0, add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),w0_p))); - current_statement_begin__ = 35; - stan::math::assign(w1, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),w1_p))); - current_statement_begin__ = 36; - stan::math::assign(w2, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),w2_p))); - current_statement_begin__ = 37; - stan::math::assign(w3, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),w3_p))); - current_statement_begin__ = 39; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 40; - stan::model::assign(gam, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(gam_p,i,"gam_p",1)))), - "assigning variable gam"); - } - current_statement_begin__ = 42; - stan::math::assign(sig, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),sig_p)))); - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w0(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w0" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w3(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w3" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(gam(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: gam" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(sig(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: sig" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 27; - current_statement_begin__ = 28; - current_statement_begin__ = 29; - current_statement_begin__ = 30; - current_statement_begin__ = 31; - check_greater_or_equal(function__,"gam",gam,0); - check_less_or_equal(function__,"gam",gam,1); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"sig",sig,0); - - // model body - - current_statement_begin__ = 45; - lp_accum__.add(normal_log(mu_p, 0, 1.0)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 49; - lp_accum__.add(normal_log(w0_p, 0, 1.0)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(w1_p, 0, 1.0)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(w2_p, 0, 1.0)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(w3_p, 0, 1.0)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(gam_p, 0, 1.0)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(sig_p, 0, 1.0)); - current_statement_begin__ = 56; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 57; - local_scalar_t__ cert_sum; - (void) cert_sum; // dummy to suppress unused var warning - - stan::math::initialize(cert_sum, DUMMY_VAR__); - stan::math::fill(cert_sum,DUMMY_VAR__); - current_statement_begin__ = 58; - local_scalar_t__ ev_sum; - (void) ev_sum; // dummy to suppress unused var warning - - stan::math::initialize(ev_sum, DUMMY_VAR__); - stan::math::fill(ev_sum,DUMMY_VAR__); - current_statement_begin__ = 59; - local_scalar_t__ rpe_sum; - (void) rpe_sum; // dummy to suppress unused var warning - - stan::math::initialize(rpe_sum, DUMMY_VAR__); - stan::math::fill(rpe_sum,DUMMY_VAR__); - - - current_statement_begin__ = 62; - stan::math::assign(cert_sum, 0); - current_statement_begin__ = 63; - stan::math::assign(ev_sum, 0); - current_statement_begin__ = 64; - stan::math::assign(rpe_sum, 0); - current_statement_begin__ = 66; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 67; - if (as_bool((primitive_value(logical_eq(t,1)) || primitive_value((primitive_value(logical_gt(t,1)) && primitive_value(logical_neq(get_base1(get_base1(RT_happy,i,"RT_happy",1),t,"RT_happy",2),get_base1(get_base1(RT_happy,i,"RT_happy",1),(t - 1),"RT_happy",2)))))))) { - - current_statement_begin__ = 68; - lp_accum__.add(normal_log(get_base1(get_base1(happy,i,"happy",1),t,"happy",2), (((get_base1(w0,i,"w0",1) + (get_base1(w1,i,"w1",1) * cert_sum)) + (get_base1(w2,i,"w2",1) * ev_sum)) + (get_base1(w3,i,"w3",1) * rpe_sum)), get_base1(sig,i,"sig",1))); - } - current_statement_begin__ = 71; - if (as_bool(logical_eq(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),0))) { - - current_statement_begin__ = 72; - stan::math::assign(cert_sum, stan::model::deep_copy((cert_sum + (get_base1(get_base1(type,i,"type",1),t,"type",2) * get_base1(get_base1(cert,i,"cert",1),t,"cert",2))))); - } else { - - current_statement_begin__ = 74; - stan::math::assign(ev_sum, stan::model::deep_copy((ev_sum + (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - get_base1(get_base1(loss,i,"loss",1),t,"loss",2)))))); - current_statement_begin__ = 75; - stan::math::assign(rpe_sum, stan::model::deep_copy(((rpe_sum + get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - get_base1(get_base1(loss,i,"loss",1),t,"loss",2)))))); - } - current_statement_begin__ = 78; - stan::math::assign(cert_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * cert_sum))); - current_statement_begin__ = 79; - stan::math::assign(ev_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * ev_sum))); - current_statement_begin__ = 80; - stan::math::assign(rpe_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * rpe_sum))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("w0_p"); - names__.push_back("w1_p"); - names__.push_back("w2_p"); - names__.push_back("w3_p"); - names__.push_back("gam_p"); - names__.push_back("sig_p"); - names__.push_back("w0"); - names__.push_back("w1"); - names__.push_back("w2"); - names__.push_back("w3"); - names__.push_back("gam"); - names__.push_back("sig"); - names__.push_back("mu_w0"); - names__.push_back("mu_w1"); - names__.push_back("mu_w2"); - names__.push_back("mu_w3"); - names__.push_back("mu_gam"); - names__.push_back("mu_sig"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_rdt_happiness_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(6); - vector_d sigma = in__.vector_lb_constrain(0,6); - vector_d w0_p = in__.vector_constrain(N); - vector_d w1_p = in__.vector_constrain(N); - vector_d w2_p = in__.vector_constrain(N); - vector_d w3_p = in__.vector_constrain(N); - vector_d gam_p = in__.vector_constrain(N); - vector_d sig_p = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w0_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w1_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w2_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w3_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gam_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(sig_p[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 27; - validate_non_negative_index("w0", "N", N); - Eigen::Matrix w0(static_cast(N)); - (void) w0; // dummy to suppress unused var warning - - stan::math::initialize(w0, DUMMY_VAR__); - stan::math::fill(w0,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("w1", "N", N); - Eigen::Matrix w1(static_cast(N)); - (void) w1; // dummy to suppress unused var warning - - stan::math::initialize(w1, DUMMY_VAR__); - stan::math::fill(w1,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("w2", "N", N); - Eigen::Matrix w2(static_cast(N)); - (void) w2; // dummy to suppress unused var warning - - stan::math::initialize(w2, DUMMY_VAR__); - stan::math::fill(w2,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("w3", "N", N); - Eigen::Matrix w3(static_cast(N)); - (void) w3; // dummy to suppress unused var warning - - stan::math::initialize(w3, DUMMY_VAR__); - stan::math::fill(w3,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("gam", "N", N); - Eigen::Matrix gam(static_cast(N)); - (void) gam; // dummy to suppress unused var warning - - stan::math::initialize(gam, DUMMY_VAR__); - stan::math::fill(gam,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("sig", "N", N); - Eigen::Matrix sig(static_cast(N)); - (void) sig; // dummy to suppress unused var warning - - stan::math::initialize(sig, DUMMY_VAR__); - stan::math::fill(sig,DUMMY_VAR__); - - - current_statement_begin__ = 34; - stan::math::assign(w0, add(get_base1(mu_p,1,"mu_p",1),multiply(get_base1(sigma,1,"sigma",1),w0_p))); - current_statement_begin__ = 35; - stan::math::assign(w1, add(get_base1(mu_p,2,"mu_p",1),multiply(get_base1(sigma,2,"sigma",1),w1_p))); - current_statement_begin__ = 36; - stan::math::assign(w2, add(get_base1(mu_p,3,"mu_p",1),multiply(get_base1(sigma,3,"sigma",1),w2_p))); - current_statement_begin__ = 37; - stan::math::assign(w3, add(get_base1(mu_p,4,"mu_p",1),multiply(get_base1(sigma,4,"sigma",1),w3_p))); - current_statement_begin__ = 39; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 40; - stan::model::assign(gam, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(gam_p,i,"gam_p",1)))), - "assigning variable gam"); - } - current_statement_begin__ = 42; - stan::math::assign(sig, stan::math::exp(add(get_base1(mu_p,6,"mu_p",1),multiply(get_base1(sigma,6,"sigma",1),sig_p)))); - - // validate transformed parameters - current_statement_begin__ = 27; - current_statement_begin__ = 28; - current_statement_begin__ = 29; - current_statement_begin__ = 30; - current_statement_begin__ = 31; - check_greater_or_equal(function__,"gam",gam,0); - check_less_or_equal(function__,"gam",gam,1); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"sig",sig,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w0[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w3[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(gam[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(sig[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 85; - local_scalar_t__ mu_w0; - (void) mu_w0; // dummy to suppress unused var warning - - stan::math::initialize(mu_w0, DUMMY_VAR__); - stan::math::fill(mu_w0,DUMMY_VAR__); - current_statement_begin__ = 86; - local_scalar_t__ mu_w1; - (void) mu_w1; // dummy to suppress unused var warning - - stan::math::initialize(mu_w1, DUMMY_VAR__); - stan::math::fill(mu_w1,DUMMY_VAR__); - current_statement_begin__ = 87; - local_scalar_t__ mu_w2; - (void) mu_w2; // dummy to suppress unused var warning - - stan::math::initialize(mu_w2, DUMMY_VAR__); - stan::math::fill(mu_w2,DUMMY_VAR__); - current_statement_begin__ = 88; - local_scalar_t__ mu_w3; - (void) mu_w3; // dummy to suppress unused var warning - - stan::math::initialize(mu_w3, DUMMY_VAR__); - stan::math::fill(mu_w3,DUMMY_VAR__); - current_statement_begin__ = 89; - local_scalar_t__ mu_gam; - (void) mu_gam; // dummy to suppress unused var warning - - stan::math::initialize(mu_gam, DUMMY_VAR__); - stan::math::fill(mu_gam,DUMMY_VAR__); - current_statement_begin__ = 90; - local_scalar_t__ mu_sig; - (void) mu_sig; // dummy to suppress unused var warning - - stan::math::initialize(mu_sig, DUMMY_VAR__); - stan::math::fill(mu_sig,DUMMY_VAR__); - current_statement_begin__ = 92; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 95; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 98; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 99; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 100; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 104; - stan::math::assign(mu_w0, get_base1(mu_p,1,"mu_p",1)); - current_statement_begin__ = 105; - stan::math::assign(mu_w1, get_base1(mu_p,2,"mu_p",1)); - current_statement_begin__ = 106; - stan::math::assign(mu_w2, get_base1(mu_p,3,"mu_p",1)); - current_statement_begin__ = 107; - stan::math::assign(mu_w3, get_base1(mu_p,4,"mu_p",1)); - current_statement_begin__ = 108; - stan::math::assign(mu_gam, Phi_approx(get_base1(mu_p,5,"mu_p",1))); - current_statement_begin__ = 109; - stan::math::assign(mu_sig, stan::math::exp(get_base1(mu_p,6,"mu_p",1))); - - current_statement_begin__ = 113; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 114; - local_scalar_t__ cert_sum; - (void) cert_sum; // dummy to suppress unused var warning - - stan::math::initialize(cert_sum, DUMMY_VAR__); - stan::math::fill(cert_sum,DUMMY_VAR__); - current_statement_begin__ = 115; - local_scalar_t__ ev_sum; - (void) ev_sum; // dummy to suppress unused var warning - - stan::math::initialize(ev_sum, DUMMY_VAR__); - stan::math::fill(ev_sum,DUMMY_VAR__); - current_statement_begin__ = 116; - local_scalar_t__ rpe_sum; - (void) rpe_sum; // dummy to suppress unused var warning - - stan::math::initialize(rpe_sum, DUMMY_VAR__); - stan::math::fill(rpe_sum,DUMMY_VAR__); - - - current_statement_begin__ = 118; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 120; - stan::math::assign(cert_sum, 0); - current_statement_begin__ = 121; - stan::math::assign(ev_sum, 0); - current_statement_begin__ = 122; - stan::math::assign(rpe_sum, 0); - current_statement_begin__ = 124; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 125; - if (as_bool((primitive_value(logical_eq(t,1)) || primitive_value((primitive_value(logical_gt(t,1)) && primitive_value(logical_neq(get_base1(get_base1(RT_happy,i,"RT_happy",1),t,"RT_happy",2),get_base1(get_base1(RT_happy,i,"RT_happy",1),(t - 1),"RT_happy",2)))))))) { - - current_statement_begin__ = 126; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + normal_log(get_base1(get_base1(happy,i,"happy",1),t,"happy",2),(((get_base1(w0,i,"w0",1) + (get_base1(w1,i,"w1",1) * cert_sum)) + (get_base1(w2,i,"w2",1) * ev_sum)) + (get_base1(w3,i,"w3",1) * rpe_sum)),get_base1(sig,i,"sig",1)))), - "assigning variable log_lik"); - current_statement_begin__ = 127; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - normal_rng((((get_base1(w0,i,"w0",1) + (get_base1(w1,i,"w1",1) * cert_sum)) + (get_base1(w2,i,"w2",1) * ev_sum)) + (get_base1(w3,i,"w3",1) * rpe_sum)),get_base1(sig,i,"sig",1), base_rng__), - "assigning variable y_pred"); - } - current_statement_begin__ = 130; - if (as_bool(logical_eq(get_base1(get_base1(gamble,i,"gamble",1),t,"gamble",2),0))) { - - current_statement_begin__ = 131; - stan::math::assign(cert_sum, stan::model::deep_copy((cert_sum + (get_base1(get_base1(type,i,"type",1),t,"type",2) * get_base1(get_base1(cert,i,"cert",1),t,"cert",2))))); - } else { - - current_statement_begin__ = 133; - stan::math::assign(ev_sum, stan::model::deep_copy((ev_sum + (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - get_base1(get_base1(loss,i,"loss",1),t,"loss",2)))))); - current_statement_begin__ = 134; - stan::math::assign(rpe_sum, stan::model::deep_copy(((rpe_sum + get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2)) - (0.5 * (get_base1(get_base1(gain,i,"gain",1),t,"gain",2) - get_base1(get_base1(loss,i,"loss",1),t,"loss",2)))))); - } - current_statement_begin__ = 137; - stan::math::assign(cert_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * cert_sum))); - current_statement_begin__ = 138; - stan::math::assign(ev_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * ev_sum))); - current_statement_begin__ = 139; - stan::math::assign(rpe_sum, stan::model::deep_copy((get_base1(gam,i,"gam",1) * rpe_sum))); - } - } - } - - // validate generated quantities - current_statement_begin__ = 85; - current_statement_begin__ = 86; - current_statement_begin__ = 87; - current_statement_begin__ = 88; - current_statement_begin__ = 89; - check_greater_or_equal(function__,"mu_gam",mu_gam,0); - check_less_or_equal(function__,"mu_gam",mu_gam,1); - current_statement_begin__ = 90; - check_greater_or_equal(function__,"mu_sig",mu_sig,0); - current_statement_begin__ = 92; - current_statement_begin__ = 95; - - // write generated quantities - vars__.push_back(mu_w0); - vars__.push_back(mu_w1); - vars__.push_back(mu_w2); - vars__.push_back(mu_w3); - vars__.push_back(mu_gam); - vars__.push_back(mu_sig); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_rdt_happiness"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w0_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w1_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w2_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w3_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sig_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w0" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w3" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sig" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w0"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w3"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gam"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_sig"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w0_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w1_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w2_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w3_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sig_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w0" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w3" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "gam" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sig" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w0"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w3"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_gam"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_sig"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ts_par4_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ts_par4"); - reader.add_event(203, 201, "end", "model_ts_par4"); - return reader; -} - -class model_ts_par4 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > level1_choice; - vector > level2_choice; - vector > reward; - double trans_prob; -public: - model_ts_par4(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ts_par4(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ts_par4_namespace::model_ts_par4"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - context__.validate_dims("data initialization", "level1_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - level1_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level1_choice"); - pos__ = 0; - size_t level1_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level1_choice_limit_1__; ++i_1__) { - size_t level1_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level1_choice_limit_0__; ++i_0__) { - level1_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - context__.validate_dims("data initialization", "level2_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - level2_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level2_choice"); - pos__ = 0; - size_t level2_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level2_choice_limit_1__; ++i_1__) { - size_t level2_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level2_choice_limit_0__; ++i_0__) { - level2_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - context__.validate_dims("data initialization", "reward", "int", context__.to_vec(N,T)); - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - reward = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("reward"); - pos__ = 0; - size_t reward_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { - size_t reward_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { - reward[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "trans_prob", "double", context__.to_vec()); - trans_prob = double(0); - vals_r__ = context__.vals_r("trans_prob"); - pos__ = 0; - trans_prob = vals_r__[pos__++]; - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],1); - check_less_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],2); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],1); - check_less_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],4); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],0); - check_less_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],1); - } - } - current_statement_begin__ = 8; - check_greater_or_equal(function__,"trans_prob",trans_prob,0); - check_less_or_equal(function__,"trans_prob",trans_prob,1); - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "4", 4); - num_params_r__ += 4; - current_statement_begin__ = 19; - validate_non_negative_index("a_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("beta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("w_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ts_par4() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "4", 4); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(4)); - vector_d mu_p(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "4", 4); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(4)); - vector_d sigma(static_cast(4)); - for (int j1__ = 0U; j1__ < 4; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("a_pr"))) - throw std::runtime_error("variable a_pr missing"); - vals_r__ = context__.vals_r("a_pr"); - pos__ = 0U; - validate_non_negative_index("a_pr", "N", N); - context__.validate_dims("initialization", "a_pr", "vector_d", context__.to_vec(N)); - vector_d a_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta_pr"))) - throw std::runtime_error("variable beta_pr missing"); - vals_r__ = context__.vals_r("beta_pr"); - pos__ = 0U; - validate_non_negative_index("beta_pr", "N", N); - context__.validate_dims("initialization", "beta_pr", "vector_d", context__.to_vec(N)); - vector_d beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("w_pr"))) - throw std::runtime_error("variable w_pr missing"); - vals_r__ = context__.vals_r("w_pr"); - pos__ = 0U; - validate_non_negative_index("w_pr", "N", N); - context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); - vector_d w_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(4,lp__); - else - mu_p = in__.vector_constrain(4); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,4,lp__); - else - sigma = in__.vector_lb_constrain(0,4); - - Eigen::Matrix a_pr; - (void) a_pr; // dummy to suppress unused var warning - if (jacobian__) - a_pr = in__.vector_constrain(N,lp__); - else - a_pr = in__.vector_constrain(N); - - Eigen::Matrix beta_pr; - (void) beta_pr; // dummy to suppress unused var warning - if (jacobian__) - beta_pr = in__.vector_constrain(N,lp__); - else - beta_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix w_pr; - (void) w_pr; // dummy to suppress unused var warning - if (jacobian__) - w_pr = in__.vector_constrain(N,lp__); - else - w_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 26; - validate_non_negative_index("a", "N", N); - Eigen::Matrix a(static_cast(N)); - (void) a; // dummy to suppress unused var warning - - stan::math::initialize(a, DUMMY_VAR__); - stan::math::fill(a,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(a, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a_pr,i,"a_pr",1)))), - "assigning variable a"); - current_statement_begin__ = 33; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))), - "assigning variable beta"); - current_statement_begin__ = 34; - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - current_statement_begin__ = 35; - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 26; - check_greater_or_equal(function__,"a",a,0); - check_less_or_equal(function__,"a",a,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"beta",beta,0); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // model body - - current_statement_begin__ = 40; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 41; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(a_pr, 0, 1)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(beta_pr, 0, 1)); - current_statement_begin__ = 46; - lp_accum__.add(normal_log(pi_pr, 0, 1)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(w_pr, 0, 1)); - current_statement_begin__ = 49; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 51; - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - current_statement_begin__ = 52; - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - current_statement_begin__ = 53; - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - current_statement_begin__ = 54; - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 55; - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 56; - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - current_statement_begin__ = 57; - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - current_statement_begin__ = 60; - stan::math::assign(v_mb, rep_vector(0.0,2)); - current_statement_begin__ = 61; - stan::math::assign(v_mf, rep_vector(0.0,6)); - current_statement_begin__ = 62; - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - current_statement_begin__ = 64; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 66; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 67; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 70; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 71; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 75; - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - current_statement_begin__ = 76; - if (as_bool(logical_eq(t,1))) { - - current_statement_begin__ = 77; - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - current_statement_begin__ = 79; - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta,i,"beta",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - current_statement_begin__ = 81; - lp_accum__.add(bernoulli_log(level1_choice_01, level1_prob_choice2)); - current_statement_begin__ = 84; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 87; - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - current_statement_begin__ = 88; - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - current_statement_begin__ = 89; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - current_statement_begin__ = 91; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - current_statement_begin__ = 93; - lp_accum__.add(bernoulli_log(level2_choice_01, level2_prob_choice2)); - current_statement_begin__ = 97; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 100; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("a_pr"); - names__.push_back("beta_pr"); - names__.push_back("pi_pr"); - names__.push_back("w_pr"); - names__.push_back("a"); - names__.push_back("beta"); - names__.push_back("pi"); - names__.push_back("w"); - names__.push_back("mu_a"); - names__.push_back("mu_beta"); - names__.push_back("mu_pi"); - names__.push_back("mu_w"); - names__.push_back("log_lik"); - names__.push_back("y_pred_step1"); - names__.push_back("y_pred_step2"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(4); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ts_par4_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(4); - vector_d sigma = in__.vector_lb_constrain(0,4); - vector_d a_pr = in__.vector_constrain(N); - vector_d beta_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d w_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 4; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 26; - validate_non_negative_index("a", "N", N); - Eigen::Matrix a(static_cast(N)); - (void) a; // dummy to suppress unused var warning - - stan::math::initialize(a, DUMMY_VAR__); - stan::math::fill(a,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("beta", "N", N); - Eigen::Matrix beta(static_cast(N)); - (void) beta; // dummy to suppress unused var warning - - stan::math::initialize(beta, DUMMY_VAR__); - stan::math::fill(beta,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - current_statement_begin__ = 31; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 32; - stan::model::assign(a, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a_pr,i,"a_pr",1)))), - "assigning variable a"); - current_statement_begin__ = 33; - stan::model::assign(beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta_pr,i,"beta_pr",1)))), - "assigning variable beta"); - current_statement_begin__ = 34; - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - current_statement_begin__ = 35; - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - - // validate transformed parameters - current_statement_begin__ = 26; - check_greater_or_equal(function__,"a",a,0); - check_less_or_equal(function__,"a",a,1); - current_statement_begin__ = 27; - check_greater_or_equal(function__,"beta",beta,0); - current_statement_begin__ = 28; - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 108; - local_scalar_t__ mu_a; - (void) mu_a; // dummy to suppress unused var warning - - stan::math::initialize(mu_a, DUMMY_VAR__); - stan::math::fill(mu_a,DUMMY_VAR__); - current_statement_begin__ = 109; - local_scalar_t__ mu_beta; - (void) mu_beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta, DUMMY_VAR__); - stan::math::fill(mu_beta,DUMMY_VAR__); - current_statement_begin__ = 110; - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - current_statement_begin__ = 111; - local_scalar_t__ mu_w; - (void) mu_w; // dummy to suppress unused var warning - - stan::math::initialize(mu_w, DUMMY_VAR__); - stan::math::fill(mu_w,DUMMY_VAR__); - current_statement_begin__ = 114; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 117; - validate_non_negative_index("y_pred_step1", "N", N); - validate_non_negative_index("y_pred_step1", "T", T); - vector > y_pred_step1(N, (vector(T))); - stan::math::initialize(y_pred_step1, DUMMY_VAR__); - stan::math::fill(y_pred_step1,DUMMY_VAR__); - current_statement_begin__ = 118; - validate_non_negative_index("y_pred_step2", "N", N); - validate_non_negative_index("y_pred_step2", "T", T); - vector > y_pred_step2(N, (vector(T))); - stan::math::initialize(y_pred_step2, DUMMY_VAR__); - stan::math::fill(y_pred_step2,DUMMY_VAR__); - - - current_statement_begin__ = 121; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 122; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 123; - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step1"); - current_statement_begin__ = 124; - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step2"); - } - } - current_statement_begin__ = 129; - stan::math::assign(mu_a, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 130; - stan::math::assign(mu_beta, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 131; - stan::math::assign(mu_pi, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 5)); - current_statement_begin__ = 132; - stan::math::assign(mu_w, Phi_approx(get_base1(mu_p,4,"mu_p",1))); - - current_statement_begin__ = 135; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 137; - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - current_statement_begin__ = 138; - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - current_statement_begin__ = 139; - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - current_statement_begin__ = 140; - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 141; - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 142; - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - current_statement_begin__ = 143; - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - current_statement_begin__ = 146; - stan::math::assign(v_mb, rep_vector(0.0,2)); - current_statement_begin__ = 147; - stan::math::assign(v_mf, rep_vector(0.0,6)); - current_statement_begin__ = 148; - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - current_statement_begin__ = 150; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 152; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 154; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 155; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 158; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 159; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 163; - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - current_statement_begin__ = 164; - if (as_bool(logical_eq(t,1))) { - - current_statement_begin__ = 165; - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - current_statement_begin__ = 167; - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta,i,"beta",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - current_statement_begin__ = 169; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level1_choice_01,level1_prob_choice2))), - "assigning variable log_lik"); - current_statement_begin__ = 172; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 175; - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - current_statement_begin__ = 177; - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - current_statement_begin__ = 178; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - current_statement_begin__ = 180; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta,i,"beta",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - current_statement_begin__ = 182; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level2_choice_01,level2_prob_choice2))), - "assigning variable log_lik"); - current_statement_begin__ = 185; - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level1_prob_choice2, base_rng__), - "assigning variable y_pred_step1"); - current_statement_begin__ = 186; - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level2_prob_choice2, base_rng__), - "assigning variable y_pred_step2"); - current_statement_begin__ = 189; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 193; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 196; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a,i,"a",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 108; - check_greater_or_equal(function__,"mu_a",mu_a,0); - check_less_or_equal(function__,"mu_a",mu_a,1); - current_statement_begin__ = 109; - check_greater_or_equal(function__,"mu_beta",mu_beta,0); - current_statement_begin__ = 110; - check_greater_or_equal(function__,"mu_pi",mu_pi,0); - check_less_or_equal(function__,"mu_pi",mu_pi,5); - current_statement_begin__ = 111; - check_greater_or_equal(function__,"mu_w",mu_w,0); - check_less_or_equal(function__,"mu_w",mu_w,1); - current_statement_begin__ = 114; - current_statement_begin__ = 117; - current_statement_begin__ = 118; - - // write generated quantities - vars__.push_back(mu_a); - vars__.push_back(mu_beta); - vars__.push_back(mu_pi); - vars__.push_back(mu_w); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step1[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step2[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ts_par4"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 4; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ts_par6_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ts_par6"); - reader.add_event(212, 210, "end", "model_ts_par6"); - return reader; -} - -class model_ts_par6 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > level1_choice; - vector > level2_choice; - vector > reward; - double trans_prob; -public: - model_ts_par6(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ts_par6(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ts_par6_namespace::model_ts_par6"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - context__.validate_dims("data initialization", "level1_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - level1_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level1_choice"); - pos__ = 0; - size_t level1_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level1_choice_limit_1__; ++i_1__) { - size_t level1_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level1_choice_limit_0__; ++i_0__) { - level1_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - context__.validate_dims("data initialization", "level2_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - level2_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level2_choice"); - pos__ = 0; - size_t level2_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level2_choice_limit_1__; ++i_1__) { - size_t level2_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level2_choice_limit_0__; ++i_0__) { - level2_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - context__.validate_dims("data initialization", "reward", "int", context__.to_vec(N,T)); - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - reward = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("reward"); - pos__ = 0; - size_t reward_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { - size_t reward_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { - reward[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "trans_prob", "double", context__.to_vec()); - trans_prob = double(0); - vals_r__ = context__.vals_r("trans_prob"); - pos__ = 0; - trans_prob = vals_r__[pos__++]; - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],1); - check_less_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],2); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],1); - check_less_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],4); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],0); - check_less_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],1); - } - } - current_statement_begin__ = 8; - check_greater_or_equal(function__,"trans_prob",trans_prob,0); - check_less_or_equal(function__,"trans_prob",trans_prob,1); - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "6", 6); - num_params_r__ += 6; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "6", 6); - num_params_r__ += 6; - current_statement_begin__ = 19; - validate_non_negative_index("a1_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("beta1_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("a2_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("beta2_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("w_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ts_par6() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "6", 6); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(6)); - vector_d mu_p(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "6", 6); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(6)); - vector_d sigma(static_cast(6)); - for (int j1__ = 0U; j1__ < 6; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("a1_pr"))) - throw std::runtime_error("variable a1_pr missing"); - vals_r__ = context__.vals_r("a1_pr"); - pos__ = 0U; - validate_non_negative_index("a1_pr", "N", N); - context__.validate_dims("initialization", "a1_pr", "vector_d", context__.to_vec(N)); - vector_d a1_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a1_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a1_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a1_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta1_pr"))) - throw std::runtime_error("variable beta1_pr missing"); - vals_r__ = context__.vals_r("beta1_pr"); - pos__ = 0U; - validate_non_negative_index("beta1_pr", "N", N); - context__.validate_dims("initialization", "beta1_pr", "vector_d", context__.to_vec(N)); - vector_d beta1_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta1_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta1_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta1_pr: ") + e.what()); - } - - if (!(context__.contains_r("a2_pr"))) - throw std::runtime_error("variable a2_pr missing"); - vals_r__ = context__.vals_r("a2_pr"); - pos__ = 0U; - validate_non_negative_index("a2_pr", "N", N); - context__.validate_dims("initialization", "a2_pr", "vector_d", context__.to_vec(N)); - vector_d a2_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a2_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a2_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a2_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta2_pr"))) - throw std::runtime_error("variable beta2_pr missing"); - vals_r__ = context__.vals_r("beta2_pr"); - pos__ = 0U; - validate_non_negative_index("beta2_pr", "N", N); - context__.validate_dims("initialization", "beta2_pr", "vector_d", context__.to_vec(N)); - vector_d beta2_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta2_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta2_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta2_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("w_pr"))) - throw std::runtime_error("variable w_pr missing"); - vals_r__ = context__.vals_r("w_pr"); - pos__ = 0U; - validate_non_negative_index("w_pr", "N", N); - context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); - vector_d w_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(6,lp__); - else - mu_p = in__.vector_constrain(6); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,6,lp__); - else - sigma = in__.vector_lb_constrain(0,6); - - Eigen::Matrix a1_pr; - (void) a1_pr; // dummy to suppress unused var warning - if (jacobian__) - a1_pr = in__.vector_constrain(N,lp__); - else - a1_pr = in__.vector_constrain(N); - - Eigen::Matrix beta1_pr; - (void) beta1_pr; // dummy to suppress unused var warning - if (jacobian__) - beta1_pr = in__.vector_constrain(N,lp__); - else - beta1_pr = in__.vector_constrain(N); - - Eigen::Matrix a2_pr; - (void) a2_pr; // dummy to suppress unused var warning - if (jacobian__) - a2_pr = in__.vector_constrain(N,lp__); - else - a2_pr = in__.vector_constrain(N); - - Eigen::Matrix beta2_pr; - (void) beta2_pr; // dummy to suppress unused var warning - if (jacobian__) - beta2_pr = in__.vector_constrain(N,lp__); - else - beta2_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix w_pr; - (void) w_pr; // dummy to suppress unused var warning - if (jacobian__) - w_pr = in__.vector_constrain(N,lp__); - else - w_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 28; - validate_non_negative_index("a1", "N", N); - Eigen::Matrix a1(static_cast(N)); - (void) a1; // dummy to suppress unused var warning - - stan::math::initialize(a1, DUMMY_VAR__); - stan::math::fill(a1,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("beta1", "N", N); - Eigen::Matrix beta1(static_cast(N)); - (void) beta1; // dummy to suppress unused var warning - - stan::math::initialize(beta1, DUMMY_VAR__); - stan::math::fill(beta1,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("a2", "N", N); - Eigen::Matrix a2(static_cast(N)); - (void) a2; // dummy to suppress unused var warning - - stan::math::initialize(a2, DUMMY_VAR__); - stan::math::fill(a2,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("beta2", "N", N); - Eigen::Matrix beta2(static_cast(N)); - (void) beta2; // dummy to suppress unused var warning - - stan::math::initialize(beta2, DUMMY_VAR__); - stan::math::fill(beta2,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - current_statement_begin__ = 35; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 36; - stan::model::assign(a1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), - "assigning variable a1"); - current_statement_begin__ = 37; - stan::model::assign(beta1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), - "assigning variable beta1"); - current_statement_begin__ = 38; - stan::model::assign(a2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), - "assigning variable a2"); - current_statement_begin__ = 39; - stan::model::assign(beta2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), - "assigning variable beta2"); - current_statement_begin__ = 40; - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - current_statement_begin__ = 41; - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,6,"mu_p",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 28; - check_greater_or_equal(function__,"a1",a1,0); - check_less_or_equal(function__,"a1",a1,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"beta1",beta1,0); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"a2",a2,0); - check_less_or_equal(function__,"a2",a2,1); - current_statement_begin__ = 31; - check_greater_or_equal(function__,"beta2",beta2,0); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // model body - - current_statement_begin__ = 46; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(a1_pr, 0, 1)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(beta1_pr, 0, 1)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(a2_pr, 0, 1)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(beta2_pr, 0, 1)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(pi_pr, 0, 1)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(w_pr, 0, 1)); - current_statement_begin__ = 57; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 59; - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - current_statement_begin__ = 60; - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - current_statement_begin__ = 61; - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - current_statement_begin__ = 62; - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 63; - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 64; - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - current_statement_begin__ = 65; - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - current_statement_begin__ = 68; - stan::math::assign(v_mb, rep_vector(0.0,2)); - current_statement_begin__ = 69; - stan::math::assign(v_mf, rep_vector(0.0,6)); - current_statement_begin__ = 70; - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - current_statement_begin__ = 72; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 74; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 75; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 78; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 79; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 83; - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - current_statement_begin__ = 84; - if (as_bool(logical_eq(t,1))) { - - current_statement_begin__ = 85; - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - current_statement_begin__ = 87; - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - current_statement_begin__ = 89; - lp_accum__.add(bernoulli_log(level1_choice_01, level1_prob_choice2)); - current_statement_begin__ = 92; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 95; - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - current_statement_begin__ = 96; - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - current_statement_begin__ = 97; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - current_statement_begin__ = 99; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - current_statement_begin__ = 101; - lp_accum__.add(bernoulli_log(level2_choice_01, level2_prob_choice2)); - current_statement_begin__ = 105; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 108; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("a1_pr"); - names__.push_back("beta1_pr"); - names__.push_back("a2_pr"); - names__.push_back("beta2_pr"); - names__.push_back("pi_pr"); - names__.push_back("w_pr"); - names__.push_back("a1"); - names__.push_back("beta1"); - names__.push_back("a2"); - names__.push_back("beta2"); - names__.push_back("pi"); - names__.push_back("w"); - names__.push_back("mu_a1"); - names__.push_back("mu_beta1"); - names__.push_back("mu_a2"); - names__.push_back("mu_beta2"); - names__.push_back("mu_pi"); - names__.push_back("mu_w"); - names__.push_back("log_lik"); - names__.push_back("y_pred_step1"); - names__.push_back("y_pred_step2"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(6); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ts_par6_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(6); - vector_d sigma = in__.vector_lb_constrain(0,6); - vector_d a1_pr = in__.vector_constrain(N); - vector_d beta1_pr = in__.vector_constrain(N); - vector_d a2_pr = in__.vector_constrain(N); - vector_d beta2_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d w_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 6; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a1_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta1_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a2_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta2_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 28; - validate_non_negative_index("a1", "N", N); - Eigen::Matrix a1(static_cast(N)); - (void) a1; // dummy to suppress unused var warning - - stan::math::initialize(a1, DUMMY_VAR__); - stan::math::fill(a1,DUMMY_VAR__); - current_statement_begin__ = 29; - validate_non_negative_index("beta1", "N", N); - Eigen::Matrix beta1(static_cast(N)); - (void) beta1; // dummy to suppress unused var warning - - stan::math::initialize(beta1, DUMMY_VAR__); - stan::math::fill(beta1,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("a2", "N", N); - Eigen::Matrix a2(static_cast(N)); - (void) a2; // dummy to suppress unused var warning - - stan::math::initialize(a2, DUMMY_VAR__); - stan::math::fill(a2,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("beta2", "N", N); - Eigen::Matrix beta2(static_cast(N)); - (void) beta2; // dummy to suppress unused var warning - - stan::math::initialize(beta2, DUMMY_VAR__); - stan::math::fill(beta2,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - - - current_statement_begin__ = 35; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 36; - stan::model::assign(a1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), - "assigning variable a1"); - current_statement_begin__ = 37; - stan::model::assign(beta1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), - "assigning variable beta1"); - current_statement_begin__ = 38; - stan::model::assign(a2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), - "assigning variable a2"); - current_statement_begin__ = 39; - stan::model::assign(beta2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), - "assigning variable beta2"); - current_statement_begin__ = 40; - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - current_statement_begin__ = 41; - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,6,"mu_p",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - } - - // validate transformed parameters - current_statement_begin__ = 28; - check_greater_or_equal(function__,"a1",a1,0); - check_less_or_equal(function__,"a1",a1,1); - current_statement_begin__ = 29; - check_greater_or_equal(function__,"beta1",beta1,0); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"a2",a2,0); - check_less_or_equal(function__,"a2",a2,1); - current_statement_begin__ = 31; - check_greater_or_equal(function__,"beta2",beta2,0); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 116; - local_scalar_t__ mu_a1; - (void) mu_a1; // dummy to suppress unused var warning - - stan::math::initialize(mu_a1, DUMMY_VAR__); - stan::math::fill(mu_a1,DUMMY_VAR__); - current_statement_begin__ = 117; - local_scalar_t__ mu_beta1; - (void) mu_beta1; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta1, DUMMY_VAR__); - stan::math::fill(mu_beta1,DUMMY_VAR__); - current_statement_begin__ = 118; - local_scalar_t__ mu_a2; - (void) mu_a2; // dummy to suppress unused var warning - - stan::math::initialize(mu_a2, DUMMY_VAR__); - stan::math::fill(mu_a2,DUMMY_VAR__); - current_statement_begin__ = 119; - local_scalar_t__ mu_beta2; - (void) mu_beta2; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta2, DUMMY_VAR__); - stan::math::fill(mu_beta2,DUMMY_VAR__); - current_statement_begin__ = 120; - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - current_statement_begin__ = 121; - local_scalar_t__ mu_w; - (void) mu_w; // dummy to suppress unused var warning - - stan::math::initialize(mu_w, DUMMY_VAR__); - stan::math::fill(mu_w,DUMMY_VAR__); - current_statement_begin__ = 124; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 127; - validate_non_negative_index("y_pred_step1", "N", N); - validate_non_negative_index("y_pred_step1", "T", T); - vector > y_pred_step1(N, (vector(T))); - stan::math::initialize(y_pred_step1, DUMMY_VAR__); - stan::math::fill(y_pred_step1,DUMMY_VAR__); - current_statement_begin__ = 128; - validate_non_negative_index("y_pred_step2", "N", N); - validate_non_negative_index("y_pred_step2", "T", T); - vector > y_pred_step2(N, (vector(T))); - stan::math::initialize(y_pred_step2, DUMMY_VAR__); - stan::math::fill(y_pred_step2,DUMMY_VAR__); - - - current_statement_begin__ = 131; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 132; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 133; - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step1"); - current_statement_begin__ = 134; - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step2"); - } - } - current_statement_begin__ = 139; - stan::math::assign(mu_a1, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 140; - stan::math::assign(mu_beta1, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 141; - stan::math::assign(mu_a2, Phi_approx(get_base1(mu_p,3,"mu_p",1))); - current_statement_begin__ = 142; - stan::math::assign(mu_beta2, stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - current_statement_begin__ = 143; - stan::math::assign(mu_pi, (Phi_approx(get_base1(mu_p,5,"mu_p",1)) * 5)); - current_statement_begin__ = 144; - stan::math::assign(mu_w, Phi_approx(get_base1(mu_p,6,"mu_p",1))); - - current_statement_begin__ = 147; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 149; - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - current_statement_begin__ = 150; - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - current_statement_begin__ = 151; - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - current_statement_begin__ = 152; - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 153; - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 154; - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - current_statement_begin__ = 155; - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - current_statement_begin__ = 158; - stan::math::assign(v_mb, rep_vector(0.0,2)); - current_statement_begin__ = 159; - stan::math::assign(v_mf, rep_vector(0.0,6)); - current_statement_begin__ = 160; - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - current_statement_begin__ = 162; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 164; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 166; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 167; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 170; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 171; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 175; - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - current_statement_begin__ = 176; - if (as_bool(logical_eq(t,1))) { - - current_statement_begin__ = 177; - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - current_statement_begin__ = 179; - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - current_statement_begin__ = 181; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level1_choice_01,level1_prob_choice2))), - "assigning variable log_lik"); - current_statement_begin__ = 184; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 187; - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - current_statement_begin__ = 189; - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - current_statement_begin__ = 190; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - current_statement_begin__ = 192; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - current_statement_begin__ = 194; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level2_choice_01,level2_prob_choice2))), - "assigning variable log_lik"); - current_statement_begin__ = 197; - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level1_prob_choice2, base_rng__), - "assigning variable y_pred_step1"); - current_statement_begin__ = 198; - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level2_prob_choice2, base_rng__), - "assigning variable y_pred_step2"); - current_statement_begin__ = 202; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 205; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 116; - check_greater_or_equal(function__,"mu_a1",mu_a1,0); - check_less_or_equal(function__,"mu_a1",mu_a1,1); - current_statement_begin__ = 117; - check_greater_or_equal(function__,"mu_beta1",mu_beta1,0); - current_statement_begin__ = 118; - check_greater_or_equal(function__,"mu_a2",mu_a2,0); - check_less_or_equal(function__,"mu_a2",mu_a2,1); - current_statement_begin__ = 119; - check_greater_or_equal(function__,"mu_beta2",mu_beta2,0); - current_statement_begin__ = 120; - check_greater_or_equal(function__,"mu_pi",mu_pi,0); - check_less_or_equal(function__,"mu_pi",mu_pi,5); - current_statement_begin__ = 121; - check_greater_or_equal(function__,"mu_w",mu_w,0); - check_less_or_equal(function__,"mu_w",mu_w,1); - current_statement_begin__ = 124; - current_statement_begin__ = 127; - current_statement_begin__ = 128; - - // write generated quantities - vars__.push_back(mu_a1); - vars__.push_back(mu_beta1); - vars__.push_back(mu_a2); - vars__.push_back(mu_beta2); - vars__.push_back(mu_pi); - vars__.push_back(mu_w); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step1[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step2[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ts_par6"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 6; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ts_par7_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ts_par7"); - reader.add_event(216, 214, "end", "model_ts_par7"); - return reader; -} - -class model_ts_par7 : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > level1_choice; - vector > level2_choice; - vector > reward; - double trans_prob; -public: - model_ts_par7(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ts_par7(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ts_par7_namespace::model_ts_par7"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - context__.validate_dims("data initialization", "level1_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level1_choice", "N", N); - validate_non_negative_index("level1_choice", "T", T); - level1_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level1_choice"); - pos__ = 0; - size_t level1_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level1_choice_limit_1__; ++i_1__) { - size_t level1_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level1_choice_limit_0__; ++i_0__) { - level1_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - context__.validate_dims("data initialization", "level2_choice", "int", context__.to_vec(N,T)); - validate_non_negative_index("level2_choice", "N", N); - validate_non_negative_index("level2_choice", "T", T); - level2_choice = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("level2_choice"); - pos__ = 0; - size_t level2_choice_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < level2_choice_limit_1__; ++i_1__) { - size_t level2_choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < level2_choice_limit_0__; ++i_0__) { - level2_choice[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 7; - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - context__.validate_dims("data initialization", "reward", "int", context__.to_vec(N,T)); - validate_non_negative_index("reward", "N", N); - validate_non_negative_index("reward", "T", T); - reward = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("reward"); - pos__ = 0; - size_t reward_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < reward_limit_1__; ++i_1__) { - size_t reward_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < reward_limit_0__; ++i_0__) { - reward[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - context__.validate_dims("data initialization", "trans_prob", "double", context__.to_vec()); - trans_prob = double(0); - vals_r__ = context__.vals_r("trans_prob"); - pos__ = 0; - trans_prob = vals_r__[pos__++]; - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],1); - check_less_or_equal(function__,"level1_choice[k0__][k1__]",level1_choice[k0__][k1__],2); - } - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],1); - check_less_or_equal(function__,"level2_choice[k0__][k1__]",level2_choice[k0__][k1__],4); - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],0); - check_less_or_equal(function__,"reward[k0__][k1__]",reward[k0__][k1__],1); - } - } - current_statement_begin__ = 8; - check_greater_or_equal(function__,"trans_prob",trans_prob,0); - check_less_or_equal(function__,"trans_prob",trans_prob,1); - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "7", 7); - num_params_r__ += 7; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "7", 7); - num_params_r__ += 7; - current_statement_begin__ = 19; - validate_non_negative_index("a1_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("beta1_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("a2_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 22; - validate_non_negative_index("beta2_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 23; - validate_non_negative_index("pi_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 24; - validate_non_negative_index("w_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 25; - validate_non_negative_index("lambda_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ts_par7() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "7", 7); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(7)); - vector_d mu_p(static_cast(7)); - for (int j1__ = 0U; j1__ < 7; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "7", 7); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(7)); - vector_d sigma(static_cast(7)); - for (int j1__ = 0U; j1__ < 7; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("a1_pr"))) - throw std::runtime_error("variable a1_pr missing"); - vals_r__ = context__.vals_r("a1_pr"); - pos__ = 0U; - validate_non_negative_index("a1_pr", "N", N); - context__.validate_dims("initialization", "a1_pr", "vector_d", context__.to_vec(N)); - vector_d a1_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a1_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a1_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a1_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta1_pr"))) - throw std::runtime_error("variable beta1_pr missing"); - vals_r__ = context__.vals_r("beta1_pr"); - pos__ = 0U; - validate_non_negative_index("beta1_pr", "N", N); - context__.validate_dims("initialization", "beta1_pr", "vector_d", context__.to_vec(N)); - vector_d beta1_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta1_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta1_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta1_pr: ") + e.what()); - } - - if (!(context__.contains_r("a2_pr"))) - throw std::runtime_error("variable a2_pr missing"); - vals_r__ = context__.vals_r("a2_pr"); - pos__ = 0U; - validate_non_negative_index("a2_pr", "N", N); - context__.validate_dims("initialization", "a2_pr", "vector_d", context__.to_vec(N)); - vector_d a2_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - a2_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(a2_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable a2_pr: ") + e.what()); - } - - if (!(context__.contains_r("beta2_pr"))) - throw std::runtime_error("variable beta2_pr missing"); - vals_r__ = context__.vals_r("beta2_pr"); - pos__ = 0U; - validate_non_negative_index("beta2_pr", "N", N); - context__.validate_dims("initialization", "beta2_pr", "vector_d", context__.to_vec(N)); - vector_d beta2_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - beta2_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(beta2_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable beta2_pr: ") + e.what()); - } - - if (!(context__.contains_r("pi_pr"))) - throw std::runtime_error("variable pi_pr missing"); - vals_r__ = context__.vals_r("pi_pr"); - pos__ = 0U; - validate_non_negative_index("pi_pr", "N", N); - context__.validate_dims("initialization", "pi_pr", "vector_d", context__.to_vec(N)); - vector_d pi_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - pi_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(pi_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable pi_pr: ") + e.what()); - } - - if (!(context__.contains_r("w_pr"))) - throw std::runtime_error("variable w_pr missing"); - vals_r__ = context__.vals_r("w_pr"); - pos__ = 0U; - validate_non_negative_index("w_pr", "N", N); - context__.validate_dims("initialization", "w_pr", "vector_d", context__.to_vec(N)); - vector_d w_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - w_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(w_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable w_pr: ") + e.what()); - } - - if (!(context__.contains_r("lambda_pr"))) - throw std::runtime_error("variable lambda_pr missing"); - vals_r__ = context__.vals_r("lambda_pr"); - pos__ = 0U; - validate_non_negative_index("lambda_pr", "N", N); - context__.validate_dims("initialization", "lambda_pr", "vector_d", context__.to_vec(N)); - vector_d lambda_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - lambda_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(lambda_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable lambda_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(7,lp__); - else - mu_p = in__.vector_constrain(7); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,7,lp__); - else - sigma = in__.vector_lb_constrain(0,7); - - Eigen::Matrix a1_pr; - (void) a1_pr; // dummy to suppress unused var warning - if (jacobian__) - a1_pr = in__.vector_constrain(N,lp__); - else - a1_pr = in__.vector_constrain(N); - - Eigen::Matrix beta1_pr; - (void) beta1_pr; // dummy to suppress unused var warning - if (jacobian__) - beta1_pr = in__.vector_constrain(N,lp__); - else - beta1_pr = in__.vector_constrain(N); - - Eigen::Matrix a2_pr; - (void) a2_pr; // dummy to suppress unused var warning - if (jacobian__) - a2_pr = in__.vector_constrain(N,lp__); - else - a2_pr = in__.vector_constrain(N); - - Eigen::Matrix beta2_pr; - (void) beta2_pr; // dummy to suppress unused var warning - if (jacobian__) - beta2_pr = in__.vector_constrain(N,lp__); - else - beta2_pr = in__.vector_constrain(N); - - Eigen::Matrix pi_pr; - (void) pi_pr; // dummy to suppress unused var warning - if (jacobian__) - pi_pr = in__.vector_constrain(N,lp__); - else - pi_pr = in__.vector_constrain(N); - - Eigen::Matrix w_pr; - (void) w_pr; // dummy to suppress unused var warning - if (jacobian__) - w_pr = in__.vector_constrain(N,lp__); - else - w_pr = in__.vector_constrain(N); - - Eigen::Matrix lambda_pr; - (void) lambda_pr; // dummy to suppress unused var warning - if (jacobian__) - lambda_pr = in__.vector_constrain(N,lp__); - else - lambda_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 29; - validate_non_negative_index("a1", "N", N); - Eigen::Matrix a1(static_cast(N)); - (void) a1; // dummy to suppress unused var warning - - stan::math::initialize(a1, DUMMY_VAR__); - stan::math::fill(a1,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("beta1", "N", N); - Eigen::Matrix beta1(static_cast(N)); - (void) beta1; // dummy to suppress unused var warning - - stan::math::initialize(beta1, DUMMY_VAR__); - stan::math::fill(beta1,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("a2", "N", N); - Eigen::Matrix a2(static_cast(N)); - (void) a2; // dummy to suppress unused var warning - - stan::math::initialize(a2, DUMMY_VAR__); - stan::math::fill(a2,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("beta2", "N", N); - Eigen::Matrix beta2(static_cast(N)); - (void) beta2; // dummy to suppress unused var warning - - stan::math::initialize(beta2, DUMMY_VAR__); - stan::math::fill(beta2,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - current_statement_begin__ = 37; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 38; - stan::model::assign(a1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), - "assigning variable a1"); - current_statement_begin__ = 39; - stan::model::assign(beta1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), - "assigning variable beta1"); - current_statement_begin__ = 40; - stan::model::assign(a2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), - "assigning variable a2"); - current_statement_begin__ = 41; - stan::model::assign(beta2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), - "assigning variable beta2"); - current_statement_begin__ = 42; - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - current_statement_begin__ = 43; - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,6,"mu_p",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - current_statement_begin__ = 44; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,7,"mu_p",1) + (get_base1(sigma,7,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))), - "assigning variable lambda"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta1(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta1" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(a2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: a2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(beta2(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: beta2" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(pi(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: pi" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(w(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: w" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(lambda(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: lambda" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 29; - check_greater_or_equal(function__,"a1",a1,0); - check_less_or_equal(function__,"a1",a1,1); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"beta1",beta1,0); - current_statement_begin__ = 31; - check_greater_or_equal(function__,"a2",a2,0); - check_less_or_equal(function__,"a2",a2,1); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"beta2",beta2,0); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,1); - - // model body - - current_statement_begin__ = 49; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 53; - lp_accum__.add(normal_log(a1_pr, 0, 1)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(beta1_pr, 0, 1)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(a2_pr, 0, 1)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(beta2_pr, 0, 1)); - current_statement_begin__ = 57; - lp_accum__.add(normal_log(pi_pr, 0, 1)); - current_statement_begin__ = 58; - lp_accum__.add(normal_log(w_pr, 0, 1)); - current_statement_begin__ = 59; - lp_accum__.add(normal_log(lambda_pr, 0, 1)); - current_statement_begin__ = 61; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 63; - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - current_statement_begin__ = 64; - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - current_statement_begin__ = 65; - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - current_statement_begin__ = 66; - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 67; - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 68; - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - current_statement_begin__ = 69; - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - current_statement_begin__ = 72; - stan::math::assign(v_mb, rep_vector(0.0,2)); - current_statement_begin__ = 73; - stan::math::assign(v_mf, rep_vector(0.0,6)); - current_statement_begin__ = 74; - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - current_statement_begin__ = 76; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 78; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 79; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 82; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 83; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 87; - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - current_statement_begin__ = 88; - if (as_bool(logical_eq(t,1))) { - - current_statement_begin__ = 89; - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - current_statement_begin__ = 91; - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - current_statement_begin__ = 93; - lp_accum__.add(bernoulli_log(level1_choice_01, level1_prob_choice2)); - current_statement_begin__ = 96; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 99; - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - current_statement_begin__ = 100; - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - current_statement_begin__ = 101; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - current_statement_begin__ = 103; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - current_statement_begin__ = 105; - lp_accum__.add(bernoulli_log(level2_choice_01, level2_prob_choice2)); - current_statement_begin__ = 109; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 112; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + ((get_base1(lambda,i,"lambda",1) * get_base1(a1,i,"a1",1)) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("a1_pr"); - names__.push_back("beta1_pr"); - names__.push_back("a2_pr"); - names__.push_back("beta2_pr"); - names__.push_back("pi_pr"); - names__.push_back("w_pr"); - names__.push_back("lambda_pr"); - names__.push_back("a1"); - names__.push_back("beta1"); - names__.push_back("a2"); - names__.push_back("beta2"); - names__.push_back("pi"); - names__.push_back("w"); - names__.push_back("lambda"); - names__.push_back("mu_a1"); - names__.push_back("mu_beta1"); - names__.push_back("mu_a2"); - names__.push_back("mu_beta2"); - names__.push_back("mu_pi"); - names__.push_back("mu_w"); - names__.push_back("mu_lambda"); - names__.push_back("log_lik"); - names__.push_back("y_pred_step1"); - names__.push_back("y_pred_step2"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(7); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(7); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ts_par7_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(7); - vector_d sigma = in__.vector_lb_constrain(0,7); - vector_d a1_pr = in__.vector_constrain(N); - vector_d beta1_pr = in__.vector_constrain(N); - vector_d a2_pr = in__.vector_constrain(N); - vector_d beta2_pr = in__.vector_constrain(N); - vector_d pi_pr = in__.vector_constrain(N); - vector_d w_pr = in__.vector_constrain(N); - vector_d lambda_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 7; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 7; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a1_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta1_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a2_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta2_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 29; - validate_non_negative_index("a1", "N", N); - Eigen::Matrix a1(static_cast(N)); - (void) a1; // dummy to suppress unused var warning - - stan::math::initialize(a1, DUMMY_VAR__); - stan::math::fill(a1,DUMMY_VAR__); - current_statement_begin__ = 30; - validate_non_negative_index("beta1", "N", N); - Eigen::Matrix beta1(static_cast(N)); - (void) beta1; // dummy to suppress unused var warning - - stan::math::initialize(beta1, DUMMY_VAR__); - stan::math::fill(beta1,DUMMY_VAR__); - current_statement_begin__ = 31; - validate_non_negative_index("a2", "N", N); - Eigen::Matrix a2(static_cast(N)); - (void) a2; // dummy to suppress unused var warning - - stan::math::initialize(a2, DUMMY_VAR__); - stan::math::fill(a2,DUMMY_VAR__); - current_statement_begin__ = 32; - validate_non_negative_index("beta2", "N", N); - Eigen::Matrix beta2(static_cast(N)); - (void) beta2; // dummy to suppress unused var warning - - stan::math::initialize(beta2, DUMMY_VAR__); - stan::math::fill(beta2,DUMMY_VAR__); - current_statement_begin__ = 33; - validate_non_negative_index("pi", "N", N); - Eigen::Matrix pi(static_cast(N)); - (void) pi; // dummy to suppress unused var warning - - stan::math::initialize(pi, DUMMY_VAR__); - stan::math::fill(pi,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("w", "N", N); - Eigen::Matrix w(static_cast(N)); - (void) w; // dummy to suppress unused var warning - - stan::math::initialize(w, DUMMY_VAR__); - stan::math::fill(w,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("lambda", "N", N); - Eigen::Matrix lambda(static_cast(N)); - (void) lambda; // dummy to suppress unused var warning - - stan::math::initialize(lambda, DUMMY_VAR__); - stan::math::fill(lambda,DUMMY_VAR__); - - - current_statement_begin__ = 37; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 38; - stan::model::assign(a1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(a1_pr,i,"a1_pr",1)))), - "assigning variable a1"); - current_statement_begin__ = 39; - stan::model::assign(beta1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(beta1_pr,i,"beta1_pr",1)))), - "assigning variable beta1"); - current_statement_begin__ = 40; - stan::model::assign(a2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(a2_pr,i,"a2_pr",1)))), - "assigning variable a2"); - current_statement_begin__ = 41; - stan::model::assign(beta2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::math::exp((get_base1(mu_p,4,"mu_p",1) + (get_base1(sigma,4,"sigma",1) * get_base1(beta2_pr,i,"beta2_pr",1)))), - "assigning variable beta2"); - current_statement_begin__ = 42; - stan::model::assign(pi, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,5,"mu_p",1) + (get_base1(sigma,5,"sigma",1) * get_base1(pi_pr,i,"pi_pr",1)))) * 5), - "assigning variable pi"); - current_statement_begin__ = 43; - stan::model::assign(w, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,6,"mu_p",1) + (get_base1(sigma,6,"sigma",1) * get_base1(w_pr,i,"w_pr",1)))), - "assigning variable w"); - current_statement_begin__ = 44; - stan::model::assign(lambda, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,7,"mu_p",1) + (get_base1(sigma,7,"sigma",1) * get_base1(lambda_pr,i,"lambda_pr",1)))), - "assigning variable lambda"); - } - - // validate transformed parameters - current_statement_begin__ = 29; - check_greater_or_equal(function__,"a1",a1,0); - check_less_or_equal(function__,"a1",a1,1); - current_statement_begin__ = 30; - check_greater_or_equal(function__,"beta1",beta1,0); - current_statement_begin__ = 31; - check_greater_or_equal(function__,"a2",a2,0); - check_less_or_equal(function__,"a2",a2,1); - current_statement_begin__ = 32; - check_greater_or_equal(function__,"beta2",beta2,0); - current_statement_begin__ = 33; - check_greater_or_equal(function__,"pi",pi,0); - check_less_or_equal(function__,"pi",pi,5); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"w",w,0); - check_less_or_equal(function__,"w",w,1); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"lambda",lambda,0); - check_less_or_equal(function__,"lambda",lambda,1); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta1[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(a2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(beta2[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(pi[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(w[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(lambda[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 119; - local_scalar_t__ mu_a1; - (void) mu_a1; // dummy to suppress unused var warning - - stan::math::initialize(mu_a1, DUMMY_VAR__); - stan::math::fill(mu_a1,DUMMY_VAR__); - current_statement_begin__ = 120; - local_scalar_t__ mu_beta1; - (void) mu_beta1; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta1, DUMMY_VAR__); - stan::math::fill(mu_beta1,DUMMY_VAR__); - current_statement_begin__ = 121; - local_scalar_t__ mu_a2; - (void) mu_a2; // dummy to suppress unused var warning - - stan::math::initialize(mu_a2, DUMMY_VAR__); - stan::math::fill(mu_a2,DUMMY_VAR__); - current_statement_begin__ = 122; - local_scalar_t__ mu_beta2; - (void) mu_beta2; // dummy to suppress unused var warning - - stan::math::initialize(mu_beta2, DUMMY_VAR__); - stan::math::fill(mu_beta2,DUMMY_VAR__); - current_statement_begin__ = 123; - local_scalar_t__ mu_pi; - (void) mu_pi; // dummy to suppress unused var warning - - stan::math::initialize(mu_pi, DUMMY_VAR__); - stan::math::fill(mu_pi,DUMMY_VAR__); - current_statement_begin__ = 124; - local_scalar_t__ mu_w; - (void) mu_w; // dummy to suppress unused var warning - - stan::math::initialize(mu_w, DUMMY_VAR__); - stan::math::fill(mu_w,DUMMY_VAR__); - current_statement_begin__ = 125; - local_scalar_t__ mu_lambda; - (void) mu_lambda; // dummy to suppress unused var warning - - stan::math::initialize(mu_lambda, DUMMY_VAR__); - stan::math::fill(mu_lambda,DUMMY_VAR__); - current_statement_begin__ = 128; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 131; - validate_non_negative_index("y_pred_step1", "N", N); - validate_non_negative_index("y_pred_step1", "T", T); - vector > y_pred_step1(N, (vector(T))); - stan::math::initialize(y_pred_step1, DUMMY_VAR__); - stan::math::fill(y_pred_step1,DUMMY_VAR__); - current_statement_begin__ = 132; - validate_non_negative_index("y_pred_step2", "N", N); - validate_non_negative_index("y_pred_step2", "T", T); - vector > y_pred_step2(N, (vector(T))); - stan::math::initialize(y_pred_step2, DUMMY_VAR__); - stan::math::fill(y_pred_step2,DUMMY_VAR__); - - - current_statement_begin__ = 135; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 136; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 137; - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step1"); - current_statement_begin__ = 138; - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred_step2"); - } - } - current_statement_begin__ = 143; - stan::math::assign(mu_a1, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 144; - stan::math::assign(mu_beta1, stan::math::exp(get_base1(mu_p,2,"mu_p",1))); - current_statement_begin__ = 145; - stan::math::assign(mu_a2, Phi_approx(get_base1(mu_p,3,"mu_p",1))); - current_statement_begin__ = 146; - stan::math::assign(mu_beta2, stan::math::exp(get_base1(mu_p,4,"mu_p",1))); - current_statement_begin__ = 147; - stan::math::assign(mu_pi, (Phi_approx(get_base1(mu_p,5,"mu_p",1)) * 5)); - current_statement_begin__ = 148; - stan::math::assign(mu_w, Phi_approx(get_base1(mu_p,6,"mu_p",1))); - current_statement_begin__ = 149; - stan::math::assign(mu_lambda, Phi_approx(get_base1(mu_p,7,"mu_p",1))); - - current_statement_begin__ = 152; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 154; - validate_non_negative_index("v_mb", "2", 2); - Eigen::Matrix v_mb(static_cast(2)); - (void) v_mb; // dummy to suppress unused var warning - - stan::math::initialize(v_mb, DUMMY_VAR__); - stan::math::fill(v_mb,DUMMY_VAR__); - current_statement_begin__ = 155; - validate_non_negative_index("v_mf", "6", 6); - Eigen::Matrix v_mf(static_cast(6)); - (void) v_mf; // dummy to suppress unused var warning - - stan::math::initialize(v_mf, DUMMY_VAR__); - stan::math::fill(v_mf,DUMMY_VAR__); - current_statement_begin__ = 156; - validate_non_negative_index("v_hybrid", "2", 2); - Eigen::Matrix v_hybrid(static_cast(2)); - (void) v_hybrid; // dummy to suppress unused var warning - - stan::math::initialize(v_hybrid, DUMMY_VAR__); - stan::math::fill(v_hybrid,DUMMY_VAR__); - current_statement_begin__ = 157; - local_scalar_t__ level1_prob_choice2; - (void) level1_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level1_prob_choice2, DUMMY_VAR__); - stan::math::fill(level1_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 158; - local_scalar_t__ level2_prob_choice2; - (void) level2_prob_choice2; // dummy to suppress unused var warning - - stan::math::initialize(level2_prob_choice2, DUMMY_VAR__); - stan::math::fill(level2_prob_choice2,DUMMY_VAR__); - current_statement_begin__ = 159; - int level1_choice_01(0); - (void) level1_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level1_choice_01, std::numeric_limits::min()); - current_statement_begin__ = 160; - int level2_choice_01(0); - (void) level2_choice_01; // dummy to suppress unused var warning - - stan::math::fill(level2_choice_01, std::numeric_limits::min()); - - - current_statement_begin__ = 163; - stan::math::assign(v_mb, rep_vector(0.0,2)); - current_statement_begin__ = 164; - stan::math::assign(v_mf, rep_vector(0.0,6)); - current_statement_begin__ = 165; - stan::math::assign(v_hybrid, rep_vector(0.0,2)); - current_statement_begin__ = 167; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 169; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 171; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((trans_prob * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + ((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 172; - stan::model::assign(v_mb, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - (((1 - trans_prob) * stan::math::fmax(get_base1(v_mf,3,"v_mf",1),get_base1(v_mf,4,"v_mf",1))) + (trans_prob * stan::math::fmax(get_base1(v_mf,5,"v_mf",1),get_base1(v_mf,6,"v_mf",1)))), - "assigning variable v_mb"); - current_statement_begin__ = 175; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,1,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,1,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 176; - stan::model::assign(v_hybrid, - stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list()), - ((get_base1(w,i,"w",1) * get_base1(v_mb,2,"v_mb",1)) + ((1 - get_base1(w,i,"w",1)) * get_base1(v_mf,2,"v_mf",1))), - "assigning variable v_hybrid"); - current_statement_begin__ = 180; - stan::math::assign(level1_choice_01, (get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2) - 1)); - current_statement_begin__ = 181; - if (as_bool(logical_eq(t,1))) { - - current_statement_begin__ = 182; - stan::math::assign(level1_prob_choice2, inv_logit((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))))); - } else { - - current_statement_begin__ = 184; - stan::math::assign(level1_prob_choice2, inv_logit(((get_base1(beta1,i,"beta1",1) * (get_base1(v_hybrid,2,"v_hybrid",1) - get_base1(v_hybrid,1,"v_hybrid",1))) + (get_base1(pi,i,"pi",1) * ((2 * get_base1(get_base1(level1_choice,i,"level1_choice",1),(t - 1),"level1_choice",2)) - 3))))); - } - current_statement_begin__ = 186; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level1_choice_01,level1_prob_choice2))), - "assigning variable log_lik"); - current_statement_begin__ = 189; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + (get_base1(a1,i,"a1",1) * (get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) - get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 192; - stan::math::assign(level2_choice_01, (1 - modulus(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))); - current_statement_begin__ = 194; - if (as_bool(logical_gt(get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2),2))) { - - current_statement_begin__ = 195; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,6,"v_mf",1) - get_base1(v_mf,5,"v_mf",1))))); - } else { - - current_statement_begin__ = 197; - stan::math::assign(level2_prob_choice2, inv_logit((get_base1(beta2,i,"beta2",1) * (get_base1(v_mf,4,"v_mf",1) - get_base1(v_mf,3,"v_mf",1))))); - } - current_statement_begin__ = 199; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_log(level2_choice_01,level2_prob_choice2))), - "assigning variable log_lik"); - current_statement_begin__ = 202; - stan::model::assign(y_pred_step1, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level1_prob_choice2, base_rng__), - "assigning variable y_pred_step1"); - current_statement_begin__ = 203; - stan::model::assign(y_pred_step2, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(level2_prob_choice2, base_rng__), - "assigning variable y_pred_step2"); - current_statement_begin__ = 207; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni((2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2))), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1) + (get_base1(a2,i,"a2",1) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - current_statement_begin__ = 210; - stan::model::assign(v_mf, - stan::model::cons_list(stan::model::index_uni(get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2)), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(v_mf,get_base1(get_base1(level1_choice,i,"level1_choice",1),t,"level1_choice",2),"v_mf",1) + ((get_base1(lambda,i,"lambda",1) * get_base1(a1,i,"a1",1)) * (get_base1(get_base1(reward,i,"reward",1),t,"reward",2) - get_base1(v_mf,(2 + get_base1(get_base1(level2_choice,i,"level2_choice",1),t,"level2_choice",2)),"v_mf",1))))), - "assigning variable v_mf"); - } - } - } - - // validate generated quantities - current_statement_begin__ = 119; - check_greater_or_equal(function__,"mu_a1",mu_a1,0); - check_less_or_equal(function__,"mu_a1",mu_a1,1); - current_statement_begin__ = 120; - check_greater_or_equal(function__,"mu_beta1",mu_beta1,0); - current_statement_begin__ = 121; - check_greater_or_equal(function__,"mu_a2",mu_a2,0); - check_less_or_equal(function__,"mu_a2",mu_a2,1); - current_statement_begin__ = 122; - check_greater_or_equal(function__,"mu_beta2",mu_beta2,0); - current_statement_begin__ = 123; - check_greater_or_equal(function__,"mu_pi",mu_pi,0); - check_less_or_equal(function__,"mu_pi",mu_pi,5); - current_statement_begin__ = 124; - check_greater_or_equal(function__,"mu_w",mu_w,0); - check_less_or_equal(function__,"mu_w",mu_w,1); - current_statement_begin__ = 125; - check_greater_or_equal(function__,"mu_lambda",mu_lambda,0); - check_less_or_equal(function__,"mu_lambda",mu_lambda,1); - current_statement_begin__ = 128; - current_statement_begin__ = 131; - current_statement_begin__ = 132; - - // write generated quantities - vars__.push_back(mu_a1); - vars__.push_back(mu_beta1); - vars__.push_back(mu_a2); - vars__.push_back(mu_beta2); - vars__.push_back(mu_pi); - vars__.push_back(mu_w); - vars__.push_back(mu_lambda); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step1[k_0__][k_1__]); - } - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred_step2[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ts_par7"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 7; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta1" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "a2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "beta2" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "pi" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "w" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "lambda" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta1"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_a2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_beta2"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pi"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_w"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_lambda"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step1" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred_step2" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ug_bayes_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ug_bayes"); - reader.add_event(166, 164, "end", "model_ug_bayes"); - return reader; -} - -class model_ug_bayes : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > offer; - vector > accept; - double initV; - double mu0; - double k0; - double sig20; - double nu0; -public: - model_ug_bayes(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ug_bayes(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ug_bayes_namespace::model_ug_bayes"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("offer", "N", N); - validate_non_negative_index("offer", "T", T); - context__.validate_dims("data initialization", "offer", "double", context__.to_vec(N,T)); - validate_non_negative_index("offer", "N", N); - validate_non_negative_index("offer", "T", T); - offer = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("offer"); - pos__ = 0; - size_t offer_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < offer_limit_1__; ++i_1__) { - size_t offer_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < offer_limit_0__; ++i_0__) { - offer[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("accept", "N", N); - validate_non_negative_index("accept", "T", T); - context__.validate_dims("data initialization", "accept", "int", context__.to_vec(N,T)); - validate_non_negative_index("accept", "N", N); - validate_non_negative_index("accept", "T", T); - accept = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("accept"); - pos__ = 0; - size_t accept_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < accept_limit_1__; ++i_1__) { - size_t accept_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < accept_limit_0__; ++i_0__) { - accept[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],-(1)); - check_less_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],1); - } - } - // initialize data variables - current_statement_begin__ = 10; - initV = double(0); - stan::math::fill(initV,DUMMY_VAR__); - current_statement_begin__ = 11; - mu0 = double(0); - stan::math::fill(mu0,DUMMY_VAR__); - current_statement_begin__ = 12; - k0 = double(0); - stan::math::fill(k0,DUMMY_VAR__); - current_statement_begin__ = 13; - sig20 = double(0); - stan::math::fill(sig20,DUMMY_VAR__); - current_statement_begin__ = 14; - nu0 = double(0); - stan::math::fill(nu0,DUMMY_VAR__); - - current_statement_begin__ = 16; - stan::math::assign(initV, 0.0); - current_statement_begin__ = 17; - stan::math::assign(mu0, 10.0); - current_statement_begin__ = 18; - stan::math::assign(k0, 4.0); - current_statement_begin__ = 19; - stan::math::assign(sig20, 4.0); - current_statement_begin__ = 20; - stan::math::assign(nu0, 10.0); - - // validate transformed data - current_statement_begin__ = 10; - current_statement_begin__ = 11; - current_statement_begin__ = 12; - current_statement_begin__ = 13; - current_statement_begin__ = 14; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 26; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 27; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 30; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 31; - validate_non_negative_index("Beta_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 32; - validate_non_negative_index("tau_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ug_bayes() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("Beta_pr"))) - throw std::runtime_error("variable Beta_pr missing"); - vals_r__ = context__.vals_r("Beta_pr"); - pos__ = 0U; - validate_non_negative_index("Beta_pr", "N", N); - context__.validate_dims("initialization", "Beta_pr", "vector_d", context__.to_vec(N)); - vector_d Beta_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - Beta_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(Beta_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable Beta_pr: ") + e.what()); - } - - if (!(context__.contains_r("tau_pr"))) - throw std::runtime_error("variable tau_pr missing"); - vals_r__ = context__.vals_r("tau_pr"); - pos__ = 0U; - validate_non_negative_index("tau_pr", "N", N); - context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); - vector_d tau_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix Beta_pr; - (void) Beta_pr; // dummy to suppress unused var warning - if (jacobian__) - Beta_pr = in__.vector_constrain(N,lp__); - else - Beta_pr = in__.vector_constrain(N); - - Eigen::Matrix tau_pr; - (void) tau_pr; // dummy to suppress unused var warning - if (jacobian__) - tau_pr = in__.vector_constrain(N,lp__); - else - tau_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 37; - validate_non_negative_index("alpha", "N", N); - vector alpha(N); - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("Beta", "N", N); - vector Beta(N); - stan::math::initialize(Beta, DUMMY_VAR__); - stan::math::fill(Beta,DUMMY_VAR__); - current_statement_begin__ = 39; - validate_non_negative_index("tau", "N", N); - vector tau(N); - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 41; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 42; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), - "assigning variable alpha"); - current_statement_begin__ = 43; - stan::model::assign(Beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Beta_pr,i,"Beta_pr",1)))) * 10), - "assigning variable Beta"); - current_statement_begin__ = 44; - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), - "assigning variable tau"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(Beta[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: Beta" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 37; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); - check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); - } - current_statement_begin__ = 38; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Beta[k0__]",Beta[k0__],0); - check_less_or_equal(function__,"Beta[k0__]",Beta[k0__],10); - } - current_statement_begin__ = 39; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); - check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); - } - - // model body - - current_statement_begin__ = 50; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 54; - lp_accum__.add(normal_log(alpha_pr, 0, 1.0)); - current_statement_begin__ = 55; - lp_accum__.add(normal_log(Beta_pr, 0, 1.0)); - current_statement_begin__ = 56; - lp_accum__.add(normal_log(tau_pr, 0, 1.0)); - current_statement_begin__ = 58; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 60; - local_scalar_t__ util; - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - current_statement_begin__ = 61; - local_scalar_t__ mu_old; - (void) mu_old; // dummy to suppress unused var warning - - stan::math::initialize(mu_old, DUMMY_VAR__); - stan::math::fill(mu_old,DUMMY_VAR__); - current_statement_begin__ = 62; - local_scalar_t__ mu_new; - (void) mu_new; // dummy to suppress unused var warning - - stan::math::initialize(mu_new, DUMMY_VAR__); - stan::math::fill(mu_new,DUMMY_VAR__); - current_statement_begin__ = 63; - local_scalar_t__ k_old; - (void) k_old; // dummy to suppress unused var warning - - stan::math::initialize(k_old, DUMMY_VAR__); - stan::math::fill(k_old,DUMMY_VAR__); - current_statement_begin__ = 64; - local_scalar_t__ k_new; - (void) k_new; // dummy to suppress unused var warning - - stan::math::initialize(k_new, DUMMY_VAR__); - stan::math::fill(k_new,DUMMY_VAR__); - current_statement_begin__ = 65; - local_scalar_t__ sig2_old; - (void) sig2_old; // dummy to suppress unused var warning - - stan::math::initialize(sig2_old, DUMMY_VAR__); - stan::math::fill(sig2_old,DUMMY_VAR__); - current_statement_begin__ = 66; - local_scalar_t__ sig2_new; - (void) sig2_new; // dummy to suppress unused var warning - - stan::math::initialize(sig2_new, DUMMY_VAR__); - stan::math::fill(sig2_new,DUMMY_VAR__); - current_statement_begin__ = 67; - local_scalar_t__ nu_old; - (void) nu_old; // dummy to suppress unused var warning - - stan::math::initialize(nu_old, DUMMY_VAR__); - stan::math::fill(nu_old,DUMMY_VAR__); - current_statement_begin__ = 68; - local_scalar_t__ nu_new; - (void) nu_new; // dummy to suppress unused var warning - - stan::math::initialize(nu_new, DUMMY_VAR__); - stan::math::fill(nu_new,DUMMY_VAR__); - current_statement_begin__ = 69; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - - - current_statement_begin__ = 72; - stan::math::assign(mu_old, mu0); - current_statement_begin__ = 73; - stan::math::assign(k_old, k0); - current_statement_begin__ = 74; - stan::math::assign(sig2_old, sig20); - current_statement_begin__ = 75; - stan::math::assign(nu_old, nu0); - current_statement_begin__ = 77; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 78; - stan::math::assign(k_new, (k_old + 1)); - current_statement_begin__ = 79; - stan::math::assign(nu_new, (nu_old + 1)); - current_statement_begin__ = 80; - stan::math::assign(mu_new, (((k_old / k_new) * mu_old) + ((1 / k_new) * get_base1(get_base1(offer,i,"offer",1),t,"offer",2)))); - current_statement_begin__ = 81; - stan::math::assign(sig2_new, (((nu_old / nu_new) * sig2_old) + (((1 / nu_new) * (k_old / k_new)) * pow((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old),2)))); - current_statement_begin__ = 83; - stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old)); - current_statement_begin__ = 84; - stan::math::assign(util, ((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((mu_new - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0))) - (get_base1(Beta,i,"Beta",1) * stan::math::fmax((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_new),0.0)))); - current_statement_begin__ = 86; - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2), (util * get_base1(tau,i,"tau",1)))); - current_statement_begin__ = 89; - stan::math::assign(mu_old, mu_new); - current_statement_begin__ = 90; - stan::math::assign(sig2_old, sig2_new); - current_statement_begin__ = 91; - stan::math::assign(k_old, k_new); - current_statement_begin__ = 92; - stan::math::assign(nu_old, nu_new); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("alpha_pr"); - names__.push_back("Beta_pr"); - names__.push_back("tau_pr"); - names__.push_back("alpha"); - names__.push_back("Beta"); - names__.push_back("tau"); - names__.push_back("mu_alpha"); - names__.push_back("mu_Beta"); - names__.push_back("mu_tau"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ug_bayes_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d Beta_pr = in__.vector_constrain(N); - vector_d tau_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Beta_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 37; - validate_non_negative_index("alpha", "N", N); - vector alpha(N); - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 38; - validate_non_negative_index("Beta", "N", N); - vector Beta(N); - stan::math::initialize(Beta, DUMMY_VAR__); - stan::math::fill(Beta,DUMMY_VAR__); - current_statement_begin__ = 39; - validate_non_negative_index("tau", "N", N); - vector tau(N); - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 41; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 42; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), - "assigning variable alpha"); - current_statement_begin__ = 43; - stan::model::assign(Beta, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(Beta_pr,i,"Beta_pr",1)))) * 10), - "assigning variable Beta"); - current_statement_begin__ = 44; - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), - "assigning variable tau"); - } - - // validate transformed parameters - current_statement_begin__ = 37; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); - check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); - } - current_statement_begin__ = 38; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Beta[k0__]",Beta[k0__],0); - check_less_or_equal(function__,"Beta[k0__]",Beta[k0__],10); - } - current_statement_begin__ = 39; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); - check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); - } - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(Beta[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 99; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 100; - local_scalar_t__ mu_Beta; - (void) mu_Beta; // dummy to suppress unused var warning - - stan::math::initialize(mu_Beta, DUMMY_VAR__); - stan::math::fill(mu_Beta,DUMMY_VAR__); - current_statement_begin__ = 101; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - current_statement_begin__ = 104; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 107; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 110; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 111; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 112; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 116; - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,1,"mu_p",1)) * 20)); - current_statement_begin__ = 117; - stan::math::assign(mu_Beta, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 10)); - current_statement_begin__ = 118; - stan::math::assign(mu_tau, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 10)); - - current_statement_begin__ = 121; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 123; - local_scalar_t__ util; - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - current_statement_begin__ = 124; - local_scalar_t__ mu_old; - (void) mu_old; // dummy to suppress unused var warning - - stan::math::initialize(mu_old, DUMMY_VAR__); - stan::math::fill(mu_old,DUMMY_VAR__); - current_statement_begin__ = 125; - local_scalar_t__ mu_new; - (void) mu_new; // dummy to suppress unused var warning - - stan::math::initialize(mu_new, DUMMY_VAR__); - stan::math::fill(mu_new,DUMMY_VAR__); - current_statement_begin__ = 126; - local_scalar_t__ k_old; - (void) k_old; // dummy to suppress unused var warning - - stan::math::initialize(k_old, DUMMY_VAR__); - stan::math::fill(k_old,DUMMY_VAR__); - current_statement_begin__ = 127; - local_scalar_t__ k_new; - (void) k_new; // dummy to suppress unused var warning - - stan::math::initialize(k_new, DUMMY_VAR__); - stan::math::fill(k_new,DUMMY_VAR__); - current_statement_begin__ = 128; - local_scalar_t__ sig2_old; - (void) sig2_old; // dummy to suppress unused var warning - - stan::math::initialize(sig2_old, DUMMY_VAR__); - stan::math::fill(sig2_old,DUMMY_VAR__); - current_statement_begin__ = 129; - local_scalar_t__ sig2_new; - (void) sig2_new; // dummy to suppress unused var warning - - stan::math::initialize(sig2_new, DUMMY_VAR__); - stan::math::fill(sig2_new,DUMMY_VAR__); - current_statement_begin__ = 130; - local_scalar_t__ nu_old; - (void) nu_old; // dummy to suppress unused var warning - - stan::math::initialize(nu_old, DUMMY_VAR__); - stan::math::fill(nu_old,DUMMY_VAR__); - current_statement_begin__ = 131; - local_scalar_t__ nu_new; - (void) nu_new; // dummy to suppress unused var warning - - stan::math::initialize(nu_new, DUMMY_VAR__); - stan::math::fill(nu_new,DUMMY_VAR__); - current_statement_begin__ = 132; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - - - current_statement_begin__ = 135; - stan::math::assign(mu_old, mu0); - current_statement_begin__ = 136; - stan::math::assign(k_old, k0); - current_statement_begin__ = 137; - stan::math::assign(sig2_old, sig20); - current_statement_begin__ = 138; - stan::math::assign(nu_old, nu0); - current_statement_begin__ = 140; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 142; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 143; - stan::math::assign(k_new, (k_old + 1)); - current_statement_begin__ = 144; - stan::math::assign(nu_new, (nu_old + 1)); - current_statement_begin__ = 145; - stan::math::assign(mu_new, (((k_old / k_new) * mu_old) + ((1 / k_new) * get_base1(get_base1(offer,i,"offer",1),t,"offer",2)))); - current_statement_begin__ = 146; - stan::math::assign(sig2_new, (((nu_old / nu_new) * sig2_old) + (((1 / nu_new) * (k_old / k_new)) * pow((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old),2)))); - current_statement_begin__ = 148; - stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_old)); - current_statement_begin__ = 149; - stan::math::assign(util, ((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((mu_new - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0))) - (get_base1(Beta,i,"Beta",1) * stan::math::fmax((get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - mu_new),0.0)))); - current_statement_begin__ = 151; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2),(util * get_base1(tau,i,"tau",1))))), - "assigning variable log_lik"); - current_statement_begin__ = 154; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((util * get_base1(tau,i,"tau",1))), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 157; - stan::math::assign(mu_old, mu_new); - current_statement_begin__ = 158; - stan::math::assign(sig2_old, sig2_new); - current_statement_begin__ = 159; - stan::math::assign(k_old, k_new); - current_statement_begin__ = 160; - stan::math::assign(nu_old, nu_new); - } - } - } - - // validate generated quantities - current_statement_begin__ = 99; - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,20); - current_statement_begin__ = 100; - check_greater_or_equal(function__,"mu_Beta",mu_Beta,0); - check_less_or_equal(function__,"mu_Beta",mu_Beta,10); - current_statement_begin__ = 101; - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - check_less_or_equal(function__,"mu_tau",mu_tau,10); - current_statement_begin__ = 104; - current_statement_begin__ = 107; - - // write generated quantities - vars__.push_back(mu_alpha); - vars__.push_back(mu_Beta); - vars__.push_back(mu_tau); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ug_bayes"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Beta_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "Beta" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_Beta"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_ug_delta_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_ug_delta"); - reader.add_event(128, 126, "end", "model_ug_delta"); - return reader; -} - -class model_ug_delta : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > offer; - vector > accept; -public: - model_ug_delta(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_ug_delta(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_ug_delta_namespace::model_ug_delta"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 5; - validate_non_negative_index("offer", "N", N); - validate_non_negative_index("offer", "T", T); - context__.validate_dims("data initialization", "offer", "double", context__.to_vec(N,T)); - validate_non_negative_index("offer", "N", N); - validate_non_negative_index("offer", "T", T); - offer = std::vector >(N,std::vector(T,double(0))); - vals_r__ = context__.vals_r("offer"); - pos__ = 0; - size_t offer_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < offer_limit_1__; ++i_1__) { - size_t offer_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < offer_limit_0__; ++i_0__) { - offer[i_0__][i_1__] = vals_r__[pos__++]; - } - } - current_statement_begin__ = 6; - validate_non_negative_index("accept", "N", N); - validate_non_negative_index("accept", "T", T); - context__.validate_dims("data initialization", "accept", "int", context__.to_vec(N,T)); - validate_non_negative_index("accept", "N", N); - validate_non_negative_index("accept", "T", T); - accept = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("accept"); - pos__ = 0; - size_t accept_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < accept_limit_1__; ++i_1__) { - size_t accept_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < accept_limit_0__; ++i_0__) { - accept[i_0__][i_1__] = vals_i__[pos__++]; - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],1); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 5; - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],-(1)); - check_less_or_equal(function__,"accept[k0__][k1__]",accept[k0__][k1__],1); - } - } - // initialize data variables - - - // validate transformed data - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 15; - validate_non_negative_index("mu_p", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 16; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 19; - validate_non_negative_index("ep_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 20; - validate_non_negative_index("alpha_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 21; - validate_non_negative_index("tau_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_ug_delta() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_p"))) - throw std::runtime_error("variable mu_p missing"); - vals_r__ = context__.vals_r("mu_p"); - pos__ = 0U; - validate_non_negative_index("mu_p", "3", 3); - context__.validate_dims("initialization", "mu_p", "vector_d", context__.to_vec(3)); - vector_d mu_p(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_p(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_p); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_p: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("ep_pr"))) - throw std::runtime_error("variable ep_pr missing"); - vals_r__ = context__.vals_r("ep_pr"); - pos__ = 0U; - validate_non_negative_index("ep_pr", "N", N); - context__.validate_dims("initialization", "ep_pr", "vector_d", context__.to_vec(N)); - vector_d ep_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - ep_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(ep_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable ep_pr: ") + e.what()); - } - - if (!(context__.contains_r("alpha_pr"))) - throw std::runtime_error("variable alpha_pr missing"); - vals_r__ = context__.vals_r("alpha_pr"); - pos__ = 0U; - validate_non_negative_index("alpha_pr", "N", N); - context__.validate_dims("initialization", "alpha_pr", "vector_d", context__.to_vec(N)); - vector_d alpha_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - alpha_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(alpha_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable alpha_pr: ") + e.what()); - } - - if (!(context__.contains_r("tau_pr"))) - throw std::runtime_error("variable tau_pr missing"); - vals_r__ = context__.vals_r("tau_pr"); - pos__ = 0U; - validate_non_negative_index("tau_pr", "N", N); - context__.validate_dims("initialization", "tau_pr", "vector_d", context__.to_vec(N)); - vector_d tau_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - tau_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(tau_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable tau_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_p; - (void) mu_p; // dummy to suppress unused var warning - if (jacobian__) - mu_p = in__.vector_constrain(3,lp__); - else - mu_p = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix ep_pr; - (void) ep_pr; // dummy to suppress unused var warning - if (jacobian__) - ep_pr = in__.vector_constrain(N,lp__); - else - ep_pr = in__.vector_constrain(N); - - Eigen::Matrix alpha_pr; - (void) alpha_pr; // dummy to suppress unused var warning - if (jacobian__) - alpha_pr = in__.vector_constrain(N,lp__); - else - alpha_pr = in__.vector_constrain(N); - - Eigen::Matrix tau_pr; - (void) tau_pr; // dummy to suppress unused var warning - if (jacobian__) - tau_pr = in__.vector_constrain(N,lp__); - else - tau_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 26; - validate_non_negative_index("ep", "N", N); - vector ep(N); - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("alpha", "N", N); - vector alpha(N); - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("tau", "N", N); - vector tau(N); - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 30; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 31; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - current_statement_begin__ = 32; - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), - "assigning variable tau"); - current_statement_begin__ = 33; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), - "assigning variable alpha"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(ep[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: ep" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(alpha[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: alpha" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(tau[i0__])) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: tau" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 26; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"ep[k0__]",ep[k0__],0); - check_less_or_equal(function__,"ep[k0__]",ep[k0__],1); - } - current_statement_begin__ = 27; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); - check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); - } - current_statement_begin__ = 28; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); - check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); - } - - // model body - - current_statement_begin__ = 39; - lp_accum__.add(normal_log(mu_p, 0, 1)); - current_statement_begin__ = 40; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 43; - lp_accum__.add(normal_log(ep_pr, 0, 1.0)); - current_statement_begin__ = 44; - lp_accum__.add(normal_log(alpha_pr, 0, 1.0)); - current_statement_begin__ = 45; - lp_accum__.add(normal_log(tau_pr, 0, 1.0)); - current_statement_begin__ = 47; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 49; - local_scalar_t__ f; - (void) f; // dummy to suppress unused var warning - - stan::math::initialize(f, DUMMY_VAR__); - stan::math::fill(f,DUMMY_VAR__); - current_statement_begin__ = 50; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - current_statement_begin__ = 51; - local_scalar_t__ util; - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - - - current_statement_begin__ = 54; - stan::math::assign(f, 10.0); - current_statement_begin__ = 56; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 58; - stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - f)); - current_statement_begin__ = 61; - stan::math::assign(util, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((f - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0)))); - current_statement_begin__ = 64; - lp_accum__.add(bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2), (util * get_base1(tau,i,"tau",1)))); - current_statement_begin__ = 67; - stan::math::assign(f, stan::model::deep_copy((f + (get_base1(ep,i,"ep",1) * PE)))); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_p"); - names__.push_back("sigma"); - names__.push_back("ep_pr"); - names__.push_back("alpha_pr"); - names__.push_back("tau_pr"); - names__.push_back("ep"); - names__.push_back("alpha"); - names__.push_back("tau"); - names__.push_back("mu_ep"); - names__.push_back("mu_tau"); - names__.push_back("mu_alpha"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_ug_delta_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_p = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d ep_pr = in__.vector_constrain(N); - vector_d alpha_pr = in__.vector_constrain(N); - vector_d tau_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_p[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 26; - validate_non_negative_index("ep", "N", N); - vector ep(N); - stan::math::initialize(ep, DUMMY_VAR__); - stan::math::fill(ep,DUMMY_VAR__); - current_statement_begin__ = 27; - validate_non_negative_index("alpha", "N", N); - vector alpha(N); - stan::math::initialize(alpha, DUMMY_VAR__); - stan::math::fill(alpha,DUMMY_VAR__); - current_statement_begin__ = 28; - validate_non_negative_index("tau", "N", N); - vector tau(N); - stan::math::initialize(tau, DUMMY_VAR__); - stan::math::fill(tau,DUMMY_VAR__); - - - current_statement_begin__ = 30; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 31; - stan::model::assign(ep, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_p,1,"mu_p",1) + (get_base1(sigma,1,"sigma",1) * get_base1(ep_pr,i,"ep_pr",1)))), - "assigning variable ep"); - current_statement_begin__ = 32; - stan::model::assign(tau, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,2,"mu_p",1) + (get_base1(sigma,2,"sigma",1) * get_base1(tau_pr,i,"tau_pr",1)))) * 10), - "assigning variable tau"); - current_statement_begin__ = 33; - stan::model::assign(alpha, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_p,3,"mu_p",1) + (get_base1(sigma,3,"sigma",1) * get_base1(alpha_pr,i,"alpha_pr",1)))) * 20), - "assigning variable alpha"); - } - - // validate transformed parameters - current_statement_begin__ = 26; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"ep[k0__]",ep[k0__],0); - check_less_or_equal(function__,"ep[k0__]",ep[k0__],1); - } - current_statement_begin__ = 27; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"alpha[k0__]",alpha[k0__],0); - check_less_or_equal(function__,"alpha[k0__]",alpha[k0__],20); - } - current_statement_begin__ = 28; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"tau[k0__]",tau[k0__],0); - check_less_or_equal(function__,"tau[k0__]",tau[k0__],10); - } - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(ep[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(alpha[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(tau[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 75; - local_scalar_t__ mu_ep; - (void) mu_ep; // dummy to suppress unused var warning - - stan::math::initialize(mu_ep, DUMMY_VAR__); - stan::math::fill(mu_ep,DUMMY_VAR__); - current_statement_begin__ = 76; - local_scalar_t__ mu_tau; - (void) mu_tau; // dummy to suppress unused var warning - - stan::math::initialize(mu_tau, DUMMY_VAR__); - stan::math::fill(mu_tau,DUMMY_VAR__); - current_statement_begin__ = 77; - local_scalar_t__ mu_alpha; - (void) mu_alpha; // dummy to suppress unused var warning - - stan::math::initialize(mu_alpha, DUMMY_VAR__); - stan::math::fill(mu_alpha,DUMMY_VAR__); - current_statement_begin__ = 80; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 83; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "T", T); - vector > y_pred(N, (vector(T))); - stan::math::initialize(y_pred, DUMMY_VAR__); - stan::math::fill(y_pred,DUMMY_VAR__); - - - current_statement_begin__ = 86; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 87; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 88; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - -(1), - "assigning variable y_pred"); - } - } - current_statement_begin__ = 92; - stan::math::assign(mu_ep, Phi_approx(get_base1(mu_p,1,"mu_p",1))); - current_statement_begin__ = 93; - stan::math::assign(mu_tau, (Phi_approx(get_base1(mu_p,2,"mu_p",1)) * 10)); - current_statement_begin__ = 94; - stan::math::assign(mu_alpha, (Phi_approx(get_base1(mu_p,3,"mu_p",1)) * 20)); - - current_statement_begin__ = 97; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 99; - local_scalar_t__ f; - (void) f; // dummy to suppress unused var warning - - stan::math::initialize(f, DUMMY_VAR__); - stan::math::fill(f,DUMMY_VAR__); - current_statement_begin__ = 100; - local_scalar_t__ PE; - (void) PE; // dummy to suppress unused var warning - - stan::math::initialize(PE, DUMMY_VAR__); - stan::math::fill(PE,DUMMY_VAR__); - current_statement_begin__ = 101; - local_scalar_t__ util; - (void) util; // dummy to suppress unused var warning - - stan::math::initialize(util, DUMMY_VAR__); - stan::math::fill(util,DUMMY_VAR__); - - - current_statement_begin__ = 104; - stan::math::assign(f, 10.0); - current_statement_begin__ = 105; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0.0, - "assigning variable log_lik"); - current_statement_begin__ = 107; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 109; - stan::math::assign(PE, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - f)); - current_statement_begin__ = 112; - stan::math::assign(util, (get_base1(get_base1(offer,i,"offer",1),t,"offer",2) - (get_base1(alpha,i,"alpha",1) * stan::math::fmax((f - get_base1(get_base1(offer,i,"offer",1),t,"offer",2)),0.0)))); - current_statement_begin__ = 115; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + bernoulli_logit_log(get_base1(get_base1(accept,i,"accept",1),t,"accept",2),(util * get_base1(tau,i,"tau",1))))), - "assigning variable log_lik"); - current_statement_begin__ = 118; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list())), - bernoulli_rng(inv_logit((util * get_base1(tau,i,"tau",1))), base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 121; - stan::math::assign(f, stan::model::deep_copy((f + (get_base1(ep,i,"ep",1) * PE)))); - } - } - } - - // validate generated quantities - current_statement_begin__ = 75; - check_greater_or_equal(function__,"mu_ep",mu_ep,0); - check_less_or_equal(function__,"mu_ep",mu_ep,1); - current_statement_begin__ = 76; - check_greater_or_equal(function__,"mu_tau",mu_tau,0); - check_less_or_equal(function__,"mu_tau",mu_tau,10); - current_statement_begin__ = 77; - check_greater_or_equal(function__,"mu_alpha",mu_alpha,0); - check_less_or_equal(function__,"mu_alpha",mu_alpha,20); - current_statement_begin__ = 80; - current_statement_begin__ = 83; - - // write generated quantities - vars__.push_back(mu_ep); - vars__.push_back(mu_tau); - vars__.push_back(mu_alpha); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_1__ = 0; k_1__ < T; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__]); - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_ug_delta"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "ep" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "alpha" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "tau" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_ep"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_tau"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_alpha"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_1__ = 1; k_1__ <= T; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - -}; // model - -} - - - - -// Code generated by Stan version 2.18.0 - -#include - -namespace model_wcs_sql_namespace { - -using std::istream; -using std::string; -using std::stringstream; -using std::vector; -using stan::io::dump; -using stan::math::lgamma; -using stan::model::prob_grad; -using namespace stan::math; - -static int current_statement_begin__; - -stan::io::program_reader prog_reader__() { - stan::io::program_reader reader; - reader.add_event(0, 0, "start", "model_wcs_sql"); - reader.add_event(168, 166, "end", "model_wcs_sql"); - return reader; -} - -class model_wcs_sql : public prob_grad { -private: - int N; - int T; - vector Tsubj; - vector > > choice; - vector > outcome; - vector > choice_match_att; - vector deck_match_rule; - matrix_d initAtt; - matrix_d unit; -public: - model_wcs_sql(stan::io::var_context& context__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, 0, pstream__); - } - - model_wcs_sql(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__ = 0) - : prob_grad(0) { - ctor_body(context__, random_seed__, pstream__); - } - - void ctor_body(stan::io::var_context& context__, - unsigned int random_seed__, - std::ostream* pstream__) { - typedef double local_scalar_t__; - - boost::ecuyer1988 base_rng__ = - stan::services::util::create_rng(random_seed__, 0); - (void) base_rng__; // suppress unused var warning - - current_statement_begin__ = -1; - - static const char* function__ = "model_wcs_sql_namespace::model_wcs_sql"; - (void) function__; // dummy to suppress unused var warning - size_t pos__; - (void) pos__; // dummy to suppress unused var warning - std::vector vals_i__; - std::vector vals_r__; - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - // initialize member variables - try { - current_statement_begin__ = 2; - context__.validate_dims("data initialization", "N", "int", context__.to_vec()); - N = int(0); - vals_i__ = context__.vals_i("N"); - pos__ = 0; - N = vals_i__[pos__++]; - current_statement_begin__ = 3; - context__.validate_dims("data initialization", "T", "int", context__.to_vec()); - T = int(0); - vals_i__ = context__.vals_i("T"); - pos__ = 0; - T = vals_i__[pos__++]; - current_statement_begin__ = 4; - validate_non_negative_index("Tsubj", "N", N); - context__.validate_dims("data initialization", "Tsubj", "int", context__.to_vec(N)); - validate_non_negative_index("Tsubj", "N", N); - Tsubj = std::vector(N,int(0)); - vals_i__ = context__.vals_i("Tsubj"); - pos__ = 0; - size_t Tsubj_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < Tsubj_limit_0__; ++i_0__) { - Tsubj[i_0__] = vals_i__[pos__++]; - } - current_statement_begin__ = 6; - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "4", 4); - validate_non_negative_index("choice", "T", T); - context__.validate_dims("data initialization", "choice", "int", context__.to_vec(N,4,T)); - validate_non_negative_index("choice", "N", N); - validate_non_negative_index("choice", "4", 4); - validate_non_negative_index("choice", "T", T); - choice = std::vector > >(N,std::vector >(4,std::vector(T,int(0)))); - vals_i__ = context__.vals_i("choice"); - pos__ = 0; - size_t choice_limit_2__ = T; - for (size_t i_2__ = 0; i_2__ < choice_limit_2__; ++i_2__) { - size_t choice_limit_1__ = 4; - for (size_t i_1__ = 0; i_1__ < choice_limit_1__; ++i_1__) { - size_t choice_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_limit_0__; ++i_0__) { - choice[i_0__][i_1__][i_2__] = vals_i__[pos__++]; - } - } - } - current_statement_begin__ = 7; - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - context__.validate_dims("data initialization", "outcome", "int", context__.to_vec(N,T)); - validate_non_negative_index("outcome", "N", N); - validate_non_negative_index("outcome", "T", T); - outcome = std::vector >(N,std::vector(T,int(0))); - vals_i__ = context__.vals_i("outcome"); - pos__ = 0; - size_t outcome_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < outcome_limit_1__; ++i_1__) { - size_t outcome_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < outcome_limit_0__; ++i_0__) { - outcome[i_0__][i_1__] = vals_i__[pos__++]; - } - } - current_statement_begin__ = 8; - validate_non_negative_index("choice_match_att", "N", N); - validate_non_negative_index("choice_match_att", "T", T); - validate_non_negative_index("choice_match_att", "1", 1); - validate_non_negative_index("choice_match_att", "3", 3); - context__.validate_dims("data initialization", "choice_match_att", "matrix_d", context__.to_vec(N,T,1,3)); - validate_non_negative_index("choice_match_att", "N", N); - validate_non_negative_index("choice_match_att", "T", T); - validate_non_negative_index("choice_match_att", "1", 1); - validate_non_negative_index("choice_match_att", "3", 3); - choice_match_att = std::vector >(N,std::vector(T,matrix_d(static_cast(1),static_cast(3)))); - vals_r__ = context__.vals_r("choice_match_att"); - pos__ = 0; - size_t choice_match_att_m_mat_lim__ = 1; - size_t choice_match_att_n_mat_lim__ = 3; - for (size_t n_mat__ = 0; n_mat__ < choice_match_att_n_mat_lim__; ++n_mat__) { - for (size_t m_mat__ = 0; m_mat__ < choice_match_att_m_mat_lim__; ++m_mat__) { - size_t choice_match_att_limit_1__ = T; - for (size_t i_1__ = 0; i_1__ < choice_match_att_limit_1__; ++i_1__) { - size_t choice_match_att_limit_0__ = N; - for (size_t i_0__ = 0; i_0__ < choice_match_att_limit_0__; ++i_0__) { - choice_match_att[i_0__][i_1__](m_mat__,n_mat__) = vals_r__[pos__++]; - } - } - } - } - current_statement_begin__ = 9; - validate_non_negative_index("deck_match_rule", "T", T); - validate_non_negative_index("deck_match_rule", "3", 3); - validate_non_negative_index("deck_match_rule", "4", 4); - context__.validate_dims("data initialization", "deck_match_rule", "matrix_d", context__.to_vec(T,3,4)); - validate_non_negative_index("deck_match_rule", "T", T); - validate_non_negative_index("deck_match_rule", "3", 3); - validate_non_negative_index("deck_match_rule", "4", 4); - deck_match_rule = std::vector(T,matrix_d(static_cast(3),static_cast(4))); - vals_r__ = context__.vals_r("deck_match_rule"); - pos__ = 0; - size_t deck_match_rule_m_mat_lim__ = 3; - size_t deck_match_rule_n_mat_lim__ = 4; - for (size_t n_mat__ = 0; n_mat__ < deck_match_rule_n_mat_lim__; ++n_mat__) { - for (size_t m_mat__ = 0; m_mat__ < deck_match_rule_m_mat_lim__; ++m_mat__) { - size_t deck_match_rule_limit_0__ = T; - for (size_t i_0__ = 0; i_0__ < deck_match_rule_limit_0__; ++i_0__) { - deck_match_rule[i_0__](m_mat__,n_mat__) = vals_r__[pos__++]; - } - } - } - - // validate, data variables - current_statement_begin__ = 2; - check_greater_or_equal(function__,"N",N,1); - current_statement_begin__ = 3; - check_greater_or_equal(function__,"T",T,1); - current_statement_begin__ = 4; - for (int k0__ = 0; k0__ < N; ++k0__) { - check_greater_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],40); - check_less_or_equal(function__,"Tsubj[k0__]",Tsubj[k0__],T); - } - current_statement_begin__ = 6; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < 4; ++k1__) { - for (int k2__ = 0; k2__ < T; ++k2__) { - check_greater_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],0); - check_less_or_equal(function__,"choice[k0__][k1__][k2__]",choice[k0__][k1__][k2__],4); - } - } - } - current_statement_begin__ = 7; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"outcome[k0__][k1__]",outcome[k0__][k1__],-(1)); - check_less_or_equal(function__,"outcome[k0__][k1__]",outcome[k0__][k1__],1); - } - } - current_statement_begin__ = 8; - for (int k0__ = 0; k0__ < N; ++k0__) { - for (int k1__ = 0; k1__ < T; ++k1__) { - check_greater_or_equal(function__,"choice_match_att[k0__][k1__]",choice_match_att[k0__][k1__],0); - check_less_or_equal(function__,"choice_match_att[k0__][k1__]",choice_match_att[k0__][k1__],1); - } - } - current_statement_begin__ = 9; - for (int k0__ = 0; k0__ < T; ++k0__) { - check_greater_or_equal(function__,"deck_match_rule[k0__]",deck_match_rule[k0__],0); - check_less_or_equal(function__,"deck_match_rule[k0__]",deck_match_rule[k0__],1); - } - // initialize data variables - current_statement_begin__ = 13; - validate_non_negative_index("initAtt", "1", 1); - validate_non_negative_index("initAtt", "3", 3); - initAtt = matrix_d(static_cast(1),static_cast(3)); - stan::math::fill(initAtt,DUMMY_VAR__); - current_statement_begin__ = 14; - validate_non_negative_index("unit", "1", 1); - validate_non_negative_index("unit", "3", 3); - unit = matrix_d(static_cast(1),static_cast(3)); - stan::math::fill(unit,DUMMY_VAR__); - - current_statement_begin__ = 16; - stan::math::assign(initAtt, rep_matrix((1.0 / 3.0),1,3)); - current_statement_begin__ = 17; - stan::math::assign(unit, rep_matrix(1.0,1,3)); - - // validate transformed data - current_statement_begin__ = 13; - current_statement_begin__ = 14; - - // validate, set parameter ranges - num_params_r__ = 0U; - param_ranges_i__.clear(); - current_statement_begin__ = 22; - validate_non_negative_index("mu_pr", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 23; - validate_non_negative_index("sigma", "3", 3); - num_params_r__ += 3; - current_statement_begin__ = 26; - validate_non_negative_index("r_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 27; - validate_non_negative_index("p_pr", "N", N); - num_params_r__ += N; - current_statement_begin__ = 28; - validate_non_negative_index("d_pr", "N", N); - num_params_r__ += N; - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - ~model_wcs_sql() { } - - - void transform_inits(const stan::io::var_context& context__, - std::vector& params_i__, - std::vector& params_r__, - std::ostream* pstream__) const { - stan::io::writer writer__(params_r__,params_i__); - size_t pos__; - (void) pos__; // dummy call to supress warning - std::vector vals_r__; - std::vector vals_i__; - - if (!(context__.contains_r("mu_pr"))) - throw std::runtime_error("variable mu_pr missing"); - vals_r__ = context__.vals_r("mu_pr"); - pos__ = 0U; - validate_non_negative_index("mu_pr", "3", 3); - context__.validate_dims("initialization", "mu_pr", "vector_d", context__.to_vec(3)); - vector_d mu_pr(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - mu_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(mu_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable mu_pr: ") + e.what()); - } - - if (!(context__.contains_r("sigma"))) - throw std::runtime_error("variable sigma missing"); - vals_r__ = context__.vals_r("sigma"); - pos__ = 0U; - validate_non_negative_index("sigma", "3", 3); - context__.validate_dims("initialization", "sigma", "vector_d", context__.to_vec(3)); - vector_d sigma(static_cast(3)); - for (int j1__ = 0U; j1__ < 3; ++j1__) - sigma(j1__) = vals_r__[pos__++]; - try { - writer__.vector_lb_unconstrain(0,sigma); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable sigma: ") + e.what()); - } - - if (!(context__.contains_r("r_pr"))) - throw std::runtime_error("variable r_pr missing"); - vals_r__ = context__.vals_r("r_pr"); - pos__ = 0U; - validate_non_negative_index("r_pr", "N", N); - context__.validate_dims("initialization", "r_pr", "vector_d", context__.to_vec(N)); - vector_d r_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - r_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(r_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable r_pr: ") + e.what()); - } - - if (!(context__.contains_r("p_pr"))) - throw std::runtime_error("variable p_pr missing"); - vals_r__ = context__.vals_r("p_pr"); - pos__ = 0U; - validate_non_negative_index("p_pr", "N", N); - context__.validate_dims("initialization", "p_pr", "vector_d", context__.to_vec(N)); - vector_d p_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - p_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(p_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable p_pr: ") + e.what()); - } - - if (!(context__.contains_r("d_pr"))) - throw std::runtime_error("variable d_pr missing"); - vals_r__ = context__.vals_r("d_pr"); - pos__ = 0U; - validate_non_negative_index("d_pr", "N", N); - context__.validate_dims("initialization", "d_pr", "vector_d", context__.to_vec(N)); - vector_d d_pr(static_cast(N)); - for (int j1__ = 0U; j1__ < N; ++j1__) - d_pr(j1__) = vals_r__[pos__++]; - try { - writer__.vector_unconstrain(d_pr); - } catch (const std::exception& e) { - throw std::runtime_error(std::string("Error transforming variable d_pr: ") + e.what()); - } - - params_r__ = writer__.data_r(); - params_i__ = writer__.data_i(); - } - - void transform_inits(const stan::io::var_context& context, - Eigen::Matrix& params_r, - std::ostream* pstream__) const { - std::vector params_r_vec; - std::vector params_i_vec; - transform_inits(context, params_i_vec, params_r_vec, pstream__); - params_r.resize(params_r_vec.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r(i) = params_r_vec[i]; - } - - - template - T__ log_prob(vector& params_r__, - vector& params_i__, - std::ostream* pstream__ = 0) const { - - typedef T__ local_scalar_t__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - T__ lp__(0.0); - stan::math::accumulator lp_accum__; - - try { - // model parameters - stan::io::reader in__(params_r__,params_i__); - - Eigen::Matrix mu_pr; - (void) mu_pr; // dummy to suppress unused var warning - if (jacobian__) - mu_pr = in__.vector_constrain(3,lp__); - else - mu_pr = in__.vector_constrain(3); - - Eigen::Matrix sigma; - (void) sigma; // dummy to suppress unused var warning - if (jacobian__) - sigma = in__.vector_lb_constrain(0,3,lp__); - else - sigma = in__.vector_lb_constrain(0,3); - - Eigen::Matrix r_pr; - (void) r_pr; // dummy to suppress unused var warning - if (jacobian__) - r_pr = in__.vector_constrain(N,lp__); - else - r_pr = in__.vector_constrain(N); - - Eigen::Matrix p_pr; - (void) p_pr; // dummy to suppress unused var warning - if (jacobian__) - p_pr = in__.vector_constrain(N,lp__); - else - p_pr = in__.vector_constrain(N); - - Eigen::Matrix d_pr; - (void) d_pr; // dummy to suppress unused var warning - if (jacobian__) - d_pr = in__.vector_constrain(N,lp__); - else - d_pr = in__.vector_constrain(N); - - - // transformed parameters - current_statement_begin__ = 33; - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("p", "N", N); - Eigen::Matrix p(static_cast(N)); - (void) p; // dummy to suppress unused var warning - - stan::math::initialize(p, DUMMY_VAR__); - stan::math::fill(p,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("d", "N", N); - Eigen::Matrix d(static_cast(N)); - (void) d; // dummy to suppress unused var warning - - stan::math::initialize(d, DUMMY_VAR__); - stan::math::fill(d,DUMMY_VAR__); - - - current_statement_begin__ = 37; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 38; - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - current_statement_begin__ = 39; - stan::model::assign(p, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(p_pr,i,"p_pr",1)))), - "assigning variable p"); - current_statement_begin__ = 40; - stan::model::assign(d, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(d_pr,i,"d_pr",1)))) * 5), - "assigning variable d"); - } - - // validate transformed parameters - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(r(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: r" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(p(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: p" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - for (int i0__ = 0; i0__ < N; ++i0__) { - if (stan::math::is_uninitialized(d(i0__))) { - std::stringstream msg__; - msg__ << "Undefined transformed parameter: d" << '[' << i0__ << ']'; - throw std::runtime_error(msg__.str()); - } - } - - const char* function__ = "validate transformed params"; - (void) function__; // dummy to suppress unused var warning - current_statement_begin__ = 33; - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"p",p,0); - check_less_or_equal(function__,"p",p,1); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"d",d,0); - - // model body - - current_statement_begin__ = 46; - lp_accum__.add(normal_log(mu_pr, 0, 1)); - current_statement_begin__ = 47; - lp_accum__.add(normal_log(sigma, 0, 0.20000000000000001)); - current_statement_begin__ = 50; - lp_accum__.add(normal_log(r_pr, 0, 1)); - current_statement_begin__ = 51; - lp_accum__.add(normal_log(p_pr, 0, 1)); - current_statement_begin__ = 52; - lp_accum__.add(normal_log(d_pr, 0, 1)); - current_statement_begin__ = 54; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 56; - validate_non_negative_index("pred_prob_mat", "4", 4); - Eigen::Matrix pred_prob_mat(static_cast(4)); - (void) pred_prob_mat; // dummy to suppress unused var warning - - stan::math::initialize(pred_prob_mat, DUMMY_VAR__); - stan::math::fill(pred_prob_mat,DUMMY_VAR__); - current_statement_begin__ = 57; - validate_non_negative_index("subj_att", "1", 1); - validate_non_negative_index("subj_att", "3", 3); - Eigen::Matrix subj_att(static_cast(1),static_cast(3)); - (void) subj_att; // dummy to suppress unused var warning - - stan::math::initialize(subj_att, DUMMY_VAR__); - stan::math::fill(subj_att,DUMMY_VAR__); - current_statement_begin__ = 58; - validate_non_negative_index("att_signal", "1", 1); - validate_non_negative_index("att_signal", "3", 3); - Eigen::Matrix att_signal(static_cast(1),static_cast(3)); - (void) att_signal; // dummy to suppress unused var warning - - stan::math::initialize(att_signal, DUMMY_VAR__); - stan::math::fill(att_signal,DUMMY_VAR__); - current_statement_begin__ = 59; - validate_non_negative_index("tmpatt", "1", 1); - validate_non_negative_index("tmpatt", "3", 3); - Eigen::Matrix tmpatt(static_cast(1),static_cast(3)); - (void) tmpatt; // dummy to suppress unused var warning - - stan::math::initialize(tmpatt, DUMMY_VAR__); - stan::math::fill(tmpatt,DUMMY_VAR__); - current_statement_begin__ = 60; - validate_non_negative_index("tmpp", "4", 4); - Eigen::Matrix tmpp(static_cast(4)); - (void) tmpp; // dummy to suppress unused var warning - - stan::math::initialize(tmpp, DUMMY_VAR__); - stan::math::fill(tmpp,DUMMY_VAR__); - - - current_statement_begin__ = 63; - stan::math::assign(subj_att, initAtt); - current_statement_begin__ = 64; - stan::math::assign(pred_prob_mat, to_vector(multiply(subj_att,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule")))); - current_statement_begin__ = 66; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 68; - lp_accum__.add(multinomial_log(stan::model::rvalue(choice, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), "choice"), pred_prob_mat)); - current_statement_begin__ = 71; - if (as_bool(logical_eq(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),1))) { - - current_statement_begin__ = 72; - stan::math::assign(att_signal, elt_multiply(subj_att,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2))); - current_statement_begin__ = 73; - stan::math::assign(att_signal, stan::model::deep_copy(divide(att_signal,sum(att_signal)))); - current_statement_begin__ = 74; - stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(r,i,"r",1)),subj_att),multiply(get_base1(r,i,"r",1),att_signal))); - } else { - - current_statement_begin__ = 76; - stan::math::assign(att_signal, elt_multiply(subj_att,subtract(unit,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2)))); - current_statement_begin__ = 77; - stan::math::assign(att_signal, stan::model::deep_copy(divide(att_signal,sum(att_signal)))); - current_statement_begin__ = 78; - stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(p,i,"p",1)),subj_att),multiply(get_base1(p,i,"p",1),att_signal))); - } - current_statement_begin__ = 82; - stan::math::assign(subj_att, add(multiply(divide(tmpatt,sum(tmpatt)),0.99980000000000002),0.0001)); - current_statement_begin__ = 84; - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,1,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - current_statement_begin__ = 85; - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,2,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - current_statement_begin__ = 86; - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(3), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,3,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - current_statement_begin__ = 89; - if (as_bool(logical_lt(t,get_base1(Tsubj,i,"Tsubj",1)))) { - - current_statement_begin__ = 90; - stan::math::assign(tmpp, add(multiply(to_vector(multiply(tmpatt,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni((t + 1)), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule"))),0.99980000000000002),0.0001)); - current_statement_begin__ = 91; - stan::math::assign(pred_prob_mat, divide(tmpp,sum(tmpp))); - } - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - - lp_accum__.add(lp__); - return lp_accum__.sum(); - - } // log_prob() - - template - T_ log_prob(Eigen::Matrix& params_r, - std::ostream* pstream = 0) const { - std::vector vec_params_r; - vec_params_r.reserve(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - vec_params_r.push_back(params_r(i)); - std::vector vec_params_i; - return log_prob(vec_params_r, vec_params_i, pstream); - } - - - void get_param_names(std::vector& names__) const { - names__.resize(0); - names__.push_back("mu_pr"); - names__.push_back("sigma"); - names__.push_back("r_pr"); - names__.push_back("p_pr"); - names__.push_back("d_pr"); - names__.push_back("r"); - names__.push_back("p"); - names__.push_back("d"); - names__.push_back("mu_r"); - names__.push_back("mu_p"); - names__.push_back("mu_d"); - names__.push_back("log_lik"); - names__.push_back("y_pred"); - } - - - void get_dims(std::vector >& dimss__) const { - dimss__.resize(0); - std::vector dims__; - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(3); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dimss__.push_back(dims__); - dims__.resize(0); - dims__.push_back(N); - dims__.push_back(4); - dims__.push_back(T); - dimss__.push_back(dims__); - } - - template - void write_array(RNG& base_rng__, - std::vector& params_r__, - std::vector& params_i__, - std::vector& vars__, - bool include_tparams__ = true, - bool include_gqs__ = true, - std::ostream* pstream__ = 0) const { - typedef double local_scalar_t__; - - vars__.resize(0); - stan::io::reader in__(params_r__,params_i__); - static const char* function__ = "model_wcs_sql_namespace::write_array"; - (void) function__; // dummy to suppress unused var warning - // read-transform, write parameters - vector_d mu_pr = in__.vector_constrain(3); - vector_d sigma = in__.vector_lb_constrain(0,3); - vector_d r_pr = in__.vector_constrain(N); - vector_d p_pr = in__.vector_constrain(N); - vector_d d_pr = in__.vector_constrain(N); - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(mu_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < 3; ++k_0__) { - vars__.push_back(sigma[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(p_pr[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(d_pr[k_0__]); - } - - // declare and define transformed parameters - double lp__ = 0.0; - (void) lp__; // dummy to suppress unused var warning - stan::math::accumulator lp_accum__; - - local_scalar_t__ DUMMY_VAR__(std::numeric_limits::quiet_NaN()); - (void) DUMMY_VAR__; // suppress unused var warning - - try { - current_statement_begin__ = 33; - validate_non_negative_index("r", "N", N); - Eigen::Matrix r(static_cast(N)); - (void) r; // dummy to suppress unused var warning - - stan::math::initialize(r, DUMMY_VAR__); - stan::math::fill(r,DUMMY_VAR__); - current_statement_begin__ = 34; - validate_non_negative_index("p", "N", N); - Eigen::Matrix p(static_cast(N)); - (void) p; // dummy to suppress unused var warning - - stan::math::initialize(p, DUMMY_VAR__); - stan::math::fill(p,DUMMY_VAR__); - current_statement_begin__ = 35; - validate_non_negative_index("d", "N", N); - Eigen::Matrix d(static_cast(N)); - (void) d; // dummy to suppress unused var warning - - stan::math::initialize(d, DUMMY_VAR__); - stan::math::fill(d,DUMMY_VAR__); - - - current_statement_begin__ = 37; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 38; - stan::model::assign(r, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_pr,1,"mu_pr",1) + (get_base1(sigma,1,"sigma",1) * get_base1(r_pr,i,"r_pr",1)))), - "assigning variable r"); - current_statement_begin__ = 39; - stan::model::assign(p, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - Phi_approx((get_base1(mu_pr,2,"mu_pr",1) + (get_base1(sigma,2,"sigma",1) * get_base1(p_pr,i,"p_pr",1)))), - "assigning variable p"); - current_statement_begin__ = 40; - stan::model::assign(d, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - (Phi_approx((get_base1(mu_pr,3,"mu_pr",1) + (get_base1(sigma,3,"sigma",1) * get_base1(d_pr,i,"d_pr",1)))) * 5), - "assigning variable d"); - } - - // validate transformed parameters - current_statement_begin__ = 33; - check_greater_or_equal(function__,"r",r,0); - check_less_or_equal(function__,"r",r,1); - current_statement_begin__ = 34; - check_greater_or_equal(function__,"p",p,0); - check_less_or_equal(function__,"p",p,1); - current_statement_begin__ = 35; - check_greater_or_equal(function__,"d",d,0); - - // write transformed parameters - if (include_tparams__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(r[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(p[k_0__]); - } - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(d[k_0__]); - } - } - if (!include_gqs__) return; - // declare and define generated quantities - current_statement_begin__ = 99; - local_scalar_t__ mu_r; - (void) mu_r; // dummy to suppress unused var warning - - stan::math::initialize(mu_r, DUMMY_VAR__); - stan::math::fill(mu_r,DUMMY_VAR__); - current_statement_begin__ = 100; - local_scalar_t__ mu_p; - (void) mu_p; // dummy to suppress unused var warning - - stan::math::initialize(mu_p, DUMMY_VAR__); - stan::math::fill(mu_p,DUMMY_VAR__); - current_statement_begin__ = 101; - local_scalar_t__ mu_d; - (void) mu_d; // dummy to suppress unused var warning - - stan::math::initialize(mu_d, DUMMY_VAR__); - stan::math::fill(mu_d,DUMMY_VAR__); - current_statement_begin__ = 104; - validate_non_negative_index("log_lik", "N", N); - vector log_lik(N); - stan::math::initialize(log_lik, DUMMY_VAR__); - stan::math::fill(log_lik,DUMMY_VAR__); - current_statement_begin__ = 107; - validate_non_negative_index("y_pred", "N", N); - validate_non_negative_index("y_pred", "4", 4); - validate_non_negative_index("y_pred", "T", T); - vector > > y_pred(N, (vector >(4, (vector(T, 0))))); - stan::math::fill(y_pred, std::numeric_limits::min()); - - - current_statement_begin__ = 110; - for (int i = 1; i <= N; ++i) { - - current_statement_begin__ = 111; - for (int t = 1; t <= T; ++t) { - - current_statement_begin__ = 112; - for (int deck = 1; deck <= 4; ++deck) { - - current_statement_begin__ = 113; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_uni(deck), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - -(1), - "assigning variable y_pred"); - } - } - } - current_statement_begin__ = 118; - stan::math::assign(mu_r, Phi_approx(get_base1(mu_pr,1,"mu_pr",1))); - current_statement_begin__ = 119; - stan::math::assign(mu_p, Phi_approx(get_base1(mu_pr,2,"mu_pr",1))); - current_statement_begin__ = 120; - stan::math::assign(mu_d, (Phi_approx(get_base1(mu_pr,3,"mu_pr",1)) * 5)); - - current_statement_begin__ = 123; - for (int i = 1; i <= N; ++i) { - { - current_statement_begin__ = 124; - validate_non_negative_index("subj_att", "1", 1); - validate_non_negative_index("subj_att", "3", 3); - Eigen::Matrix subj_att(static_cast(1),static_cast(3)); - (void) subj_att; // dummy to suppress unused var warning - - stan::math::initialize(subj_att, DUMMY_VAR__); - stan::math::fill(subj_att,DUMMY_VAR__); - current_statement_begin__ = 125; - validate_non_negative_index("att_signal", "1", 1); - validate_non_negative_index("att_signal", "3", 3); - Eigen::Matrix att_signal(static_cast(1),static_cast(3)); - (void) att_signal; // dummy to suppress unused var warning - - stan::math::initialize(att_signal, DUMMY_VAR__); - stan::math::fill(att_signal,DUMMY_VAR__); - current_statement_begin__ = 126; - validate_non_negative_index("pred_prob_mat", "4", 4); - Eigen::Matrix pred_prob_mat(static_cast(4)); - (void) pred_prob_mat; // dummy to suppress unused var warning - - stan::math::initialize(pred_prob_mat, DUMMY_VAR__); - stan::math::fill(pred_prob_mat,DUMMY_VAR__); - current_statement_begin__ = 128; - validate_non_negative_index("tmpatt", "1", 1); - validate_non_negative_index("tmpatt", "3", 3); - Eigen::Matrix tmpatt(static_cast(1),static_cast(3)); - (void) tmpatt; // dummy to suppress unused var warning - - stan::math::initialize(tmpatt, DUMMY_VAR__); - stan::math::fill(tmpatt,DUMMY_VAR__); - current_statement_begin__ = 129; - validate_non_negative_index("tmpp", "4", 4); - Eigen::Matrix tmpp(static_cast(4)); - (void) tmpp; // dummy to suppress unused var warning - - stan::math::initialize(tmpp, DUMMY_VAR__); - stan::math::fill(tmpp,DUMMY_VAR__); - - - current_statement_begin__ = 131; - stan::math::assign(subj_att, initAtt); - current_statement_begin__ = 132; - stan::math::assign(pred_prob_mat, to_vector(multiply(subj_att,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule")))); - current_statement_begin__ = 134; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - 0, - "assigning variable log_lik"); - current_statement_begin__ = 136; - for (int t = 1; t <= get_base1(Tsubj,i,"Tsubj",1); ++t) { - - current_statement_begin__ = 138; - stan::model::assign(log_lik, - stan::model::cons_list(stan::model::index_uni(i), stan::model::nil_index_list()), - stan::model::deep_copy((get_base1(log_lik,i,"log_lik",1) + multinomial_log(stan::model::rvalue(choice, stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), "choice"),pred_prob_mat))), - "assigning variable log_lik"); - current_statement_begin__ = 140; - stan::model::assign(y_pred, - stan::model::cons_list(stan::model::index_uni(i), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_uni(t), stan::model::nil_index_list()))), - multinomial_rng(pred_prob_mat,1, base_rng__), - "assigning variable y_pred"); - current_statement_begin__ = 142; - if (as_bool(logical_eq(get_base1(get_base1(outcome,i,"outcome",1),t,"outcome",2),1))) { - - current_statement_begin__ = 143; - stan::math::assign(att_signal, elt_multiply(subj_att,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2))); - current_statement_begin__ = 144; - stan::math::assign(att_signal, stan::model::deep_copy(divide(att_signal,sum(att_signal)))); - current_statement_begin__ = 145; - stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(r,i,"r",1)),subj_att),multiply(get_base1(r,i,"r",1),att_signal))); - } else { - - current_statement_begin__ = 147; - stan::math::assign(att_signal, elt_multiply(subj_att,subtract(unit,get_base1(get_base1(choice_match_att,i,"choice_match_att",1),t,"choice_match_att",2)))); - current_statement_begin__ = 148; - stan::math::assign(att_signal, stan::model::deep_copy(divide(att_signal,sum(att_signal)))); - current_statement_begin__ = 149; - stan::math::assign(tmpatt, add(multiply((1.0 - get_base1(p,i,"p",1)),subj_att),multiply(get_base1(p,i,"p",1),att_signal))); - } - current_statement_begin__ = 152; - stan::math::assign(subj_att, add(multiply(divide(tmpatt,sum(tmpatt)),0.99980000000000002),0.0001)); - current_statement_begin__ = 154; - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(1), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,1,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - current_statement_begin__ = 155; - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(2), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,2,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - current_statement_begin__ = 156; - stan::model::assign(tmpatt, - stan::model::cons_list(stan::model::index_uni(1), stan::model::cons_list(stan::model::index_uni(3), stan::model::nil_index_list())), - pow(get_base1(subj_att,1,3,"subj_att",1),get_base1(d,i,"d",1)), - "assigning variable tmpatt"); - current_statement_begin__ = 158; - if (as_bool(logical_lt(t,get_base1(Tsubj,i,"Tsubj",1)))) { - - current_statement_begin__ = 159; - stan::math::assign(tmpp, add(multiply(to_vector(multiply(tmpatt,stan::model::rvalue(deck_match_rule, stan::model::cons_list(stan::model::index_uni((t + 1)), stan::model::cons_list(stan::model::index_omni(), stan::model::cons_list(stan::model::index_omni(), stan::model::nil_index_list()))), "deck_match_rule"))),0.99980000000000002),0.0001)); - current_statement_begin__ = 160; - stan::math::assign(pred_prob_mat, divide(tmpp,sum(tmpp))); - } - } - } - } - - // validate generated quantities - current_statement_begin__ = 99; - check_greater_or_equal(function__,"mu_r",mu_r,0); - check_less_or_equal(function__,"mu_r",mu_r,1); - current_statement_begin__ = 100; - check_greater_or_equal(function__,"mu_p",mu_p,0); - check_less_or_equal(function__,"mu_p",mu_p,5); - current_statement_begin__ = 101; - check_greater_or_equal(function__,"mu_d",mu_d,0); - check_less_or_equal(function__,"mu_d",mu_d,5); - current_statement_begin__ = 104; - current_statement_begin__ = 107; - - // write generated quantities - vars__.push_back(mu_r); - vars__.push_back(mu_p); - vars__.push_back(mu_d); - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(log_lik[k_0__]); - } - for (int k_2__ = 0; k_2__ < T; ++k_2__) { - for (int k_1__ = 0; k_1__ < 4; ++k_1__) { - for (int k_0__ = 0; k_0__ < N; ++k_0__) { - vars__.push_back(y_pred[k_0__][k_1__][k_2__]); - } - } - } - - } catch (const std::exception& e) { - stan::lang::rethrow_located(e, current_statement_begin__, prog_reader__()); - // Next line prevents compiler griping about no return - throw std::runtime_error("*** IF YOU SEE THIS, PLEASE REPORT A BUG ***"); - } - } - - template - void write_array(RNG& base_rng, - Eigen::Matrix& params_r, - Eigen::Matrix& vars, - bool include_tparams = true, - bool include_gqs = true, - std::ostream* pstream = 0) const { - std::vector params_r_vec(params_r.size()); - for (int i = 0; i < params_r.size(); ++i) - params_r_vec[i] = params_r(i); - std::vector vars_vec; - std::vector params_i_vec; - write_array(base_rng,params_r_vec,params_i_vec,vars_vec,include_tparams,include_gqs,pstream); - vars.resize(vars_vec.size()); - for (int i = 0; i < vars.size(); ++i) - vars(i) = vars_vec[i]; - } - - static std::string model_name() { - return "model_wcs_sql"; - } - - - void constrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "p_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_d"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= 4; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - - - void unconstrained_param_names(std::vector& param_names__, - bool include_tparams__ = true, - bool include_gqs__ = true) const { - std::stringstream param_name_stream__; - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= 3; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "sigma" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "p_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d_pr" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - - if (!include_gqs__ && !include_tparams__) return; - - if (include_tparams__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "r" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "p" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "d" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - } - - - if (!include_gqs__) return; - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_r"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_p"; - param_names__.push_back(param_name_stream__.str()); - param_name_stream__.str(std::string()); - param_name_stream__ << "mu_d"; - param_names__.push_back(param_name_stream__.str()); - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "log_lik" << '.' << k_0__; - param_names__.push_back(param_name_stream__.str()); - } - for (int k_2__ = 1; k_2__ <= T; ++k_2__) { - for (int k_1__ = 1; k_1__ <= 4; ++k_1__) { - for (int k_0__ = 1; k_0__ <= N; ++k_0__) { - param_name_stream__.str(std::string()); - param_name_stream__ << "y_pred" << '.' << k_0__ << '.' << k_1__ << '.' << k_2__; - param_names__.push_back(param_name_stream__.str()); - } - } - } - } - -}; // model - -} - - - - -#endif diff --git a/src/init.cpp b/src/init.cpp new file mode 100644 index 00000000..5023790d --- /dev/null +++ b/src/init.cpp @@ -0,0 +1,20 @@ +// Generated by the rstantools package + + +#include +#include +#include +#include +#include + + +static const R_CallMethodDef CallEntries[] = { + {NULL, NULL, 0} +}; + + +void attribute_visible R_init_hBayesDM(DllInfo *dll) { + // next line is necessary to avoid a NOTE from R CMD check + R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); + R_useDynamicSymbols(dll, TRUE); // necessary for .onLoad() to work +} diff --git a/tools/make_cc.R b/tools/make_cc.R new file mode 100644 index 00000000..d4817e7a --- /dev/null +++ b/tools/make_cc.R @@ -0,0 +1,48 @@ +# Part of the rstanarm package for estimating model parameters +# Copyright (C) 2015, 2016, 2017 Trustees of Columbia University +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 3 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +options(warn = 3L) +options("useFancyQuotes" = FALSE) + +make_cc <- function(file) { + file <- sub("\\.cc$", ".stan", file) + cppcode <- rstan::stanc(file, allow_undefined = TRUE, + obfuscate_model_name = FALSE)$cppcode + cppcode <- sub("(class[[:space:]]+[A-Za-z_][A-Za-z0-9_]*[[:space:]]*: public prob_grad \\{)", + paste("#include \n", "\\1"), cppcode) + + cat(readLines(file.path("..", "inst", "stan_files", "pre", "license.stan")), + "#ifndef MODELS_HPP", "#define MODELS_HPP", "#define STAN__SERVICES__COMMAND_HPP", + "#include ", + cppcode, "#endif", file = sub("\\.stan$", ".hpp", file), + sep = "\n", append = FALSE) + + f <- sub("\\.stan$", "", basename(file)) + Rcpp::exposeClass(class = paste0("model_", f), + constructors = list(c("SEXP", "SEXP", "SEXP")), fields = character(), + methods = c("call_sampler", + "param_names", "param_names_oi", "param_fnames_oi", + "param_dims", "param_dims_oi", "update_param_oi", "param_oi_tidx", + "grad_log_prob", "log_prob", + "unconstrain_pars", "constrain_pars", "num_pars_unconstrained", + "unconstrained_param_names", "constrained_param_names"), + file = file.path("..", "inst", "stan_files", paste0(f, ".cc")), + header = paste0('#include "', f, '.hpp"'), + module = paste0("stan_fit4", f, "_mod"), + CppClass = "rstan::stan_fit ", + Rfile = FALSE) + return(invisible(NULL)) +} diff --git a/tools/make_cpp.R b/tools/make_cpp.R deleted file mode 100755 index 4a9ea2d0..00000000 --- a/tools/make_cpp.R +++ /dev/null @@ -1,47 +0,0 @@ -# Part of the rstanarm package for estimating model parameters -# Copyright (C) 2015 Trustees of Columbia University -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 3 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -options(warn = 3L) -stan_files <- dir("exec", pattern = "stan$", full.names = TRUE) -cat(readLines(file.path("inst", "chunks", "license.stan")), - "#ifndef MODELS_HPP", "#define MODELS_HPP", - "#define STAN__SERVICES__COMMAND_HPP", "#include ", - sapply(stan_files, FUN = function(f) { - cppcode <- rstan::stanc_builder(f, - isystem = file.path("inst", "chunks"))$cppcode - cppcode <- gsub("typedef.*stan_model.*;", "", cppcode, perl = TRUE) - return(cppcode) - }), "#endif", file = file.path("src", "include", "models.hpp"), - sep = "\n", append = FALSE) - -options("useFancyQuotes" = FALSE) - -sapply(sub(".stan", "", basename(stan_files), fixed = TRUE), function(f) { - Rcpp::exposeClass(class = paste0("model_", f), - constructors = list(c("SEXP", "SEXP")), fields = character(), - methods = c("call_sampler", - "param_names", "param_names_oi", "param_fnames_oi", - "param_dims", "param_dims_oi", "update_param_oi", "param_oi_tidx", - "grad_log_prob", "log_prob", - "unconstrain_pars", "constrain_pars", "num_pars_unconstrained", - "unconstrained_param_names", "constrained_param_names"), - file = paste0(f, "Module.cc"), header = '#include "include/models.hpp"', - module = paste0("stan_fit4", f, "_mod"), - CppClass = paste0("rstan::stan_fit "), - Rfile = FALSE) - return(invisible(NULL)) -})