Skip to content

Circular Regression Module

circular_regression

RegressionParent

Parent Regression Class: Parent class for custom circular regressions.

RegressionParent

Specifies the instance variables and methods of the common methods of circular regression analyses.

Source code in rbmpy/circular_regression/RegressionParent.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
class RegressionParent:
    """Specifies the instance variables and methods of the common methods of
    circular regression analyses.
    """

    def __init__(self, reg_vars: "RegVars"):
        """Defines the instance variables unique to each instance.

        See project-specific RegVars in child class for documentation.

        Parameters
        ----------
        reg_vars : RegVars
            Regression-variables-object instance defined in your project.
        """

        # Extract free parameters
        self.which_vars = reg_vars.which_vars

        # Extract fixed parameter values
        self.fixed_coeffs_reg = reg_vars.fixed_coeffs_reg

        # Extract prior mean and width
        self.prior_mean = reg_vars.prior_mean
        self.prior_width = reg_vars.prior_width

        # Extract other attributes
        self.n_subj = reg_vars.n_subj
        self.n_ker = reg_vars.n_ker
        self.seed = reg_vars.seed
        self.show_ind_prog = reg_vars.show_ind_prog
        self.rand_sp = reg_vars.rand_sp
        self.use_prior = reg_vars.use_prior
        self.n_sp = reg_vars.n_sp
        self.bnds = reg_vars.bnds
        self.which_update_regressors = reg_vars.which_update_regressors

    def parallel_estimation(
        self, df: pd.DataFrame, prior_columns: list
    ) -> pd.DataFrame:
        """Manages the parallel estimation of the regression models.

        Parameters
        ----------
        df : pd.DataFrame
            Data frame containing the data.
        prior_columns : list
            Selected parameters for regression.

        Returns
        -------
        pd.DataFrame
            Data frame containing regression results.
        """

        # Inform user about progress
        pbar = None
        if self.show_ind_prog:
            # Inform user
            sleep(0.1)
            print("\nRegression model estimation:")
            sleep(0.1)

            # Initialize progress bar
            pbar = tqdm(total=self.n_subj)

        # Initialize pool object for parallel processing
        pool = Pool(processes=self.n_ker)

        # Estimate parameters in parallel
        results = [
            pool.apply_async(
                self.estimation,
                args=(df[(df["subj_num"] == i + 1)].copy(),),
                callback=lambda _: callback(self.show_ind_prog, pbar),
            )
            for i in range(0, self.n_subj)
        ]

        output = [p.get() for p in results]
        pool.close()
        pool.join()

        # Close progress bar
        if self.show_ind_prog and pbar:
            pbar.close()

        # Put all results in data frame
        values = self.which_vars.values()
        columns = list(compress(prior_columns, values))
        columns.append("llh")
        columns.append("BIC")
        columns.append("group")
        columns.append("subj_num")
        columns.append("ID")
        results_df = pd.DataFrame(output, columns=columns)

        return results_df

    def estimation(self, df_subj_input: pd.DataFrame) -> list:
        """Estimates the coefficients of the circular regression model.

        Parameters
        ----------
        df_subj_input : pd.DataFrame
            Data frame containing subject-specific subset of data.

        Returns
        -------
        list
            A list with the regression results.
        """

        # Control random number generator for reproducible results
        np.random.seed(self.seed)

        # Get data matrix required for the model from child class
        df_subj = self.get_datamat(df_subj_input)

        # Adjust index of this subset of variables
        df_subj.reset_index().rename(columns={"index": "trial"})

        # Select starting points and boundaries
        # -------------------------------------

        # Extract free parameters
        values = self.which_vars.values()

        # Select boundaries according to free parameters
        bnds = list(compress(self.bnds, values))

        # Initialize with unrealistically high likelihood and no parameter estimate
        min_llh = 100000
        min_x = np.nan

        # Cycle over starting points
        for _ in range(0, self.n_sp):

            # Get project-specific starting points for child class
            x0 = self.get_starting_point()

            # Select starting points according to free parameters
            x0 = np.array(list(compress(x0, values)))

            # Estimate parameters
            res = minimize(
                self.llh,
                x0,
                args=(df_subj,),
                method="L-BFGS-B",
                options={"disp": False, "maxiter": 500},
                bounds=bnds,
            )

            # Parameter values
            x = res.x

            # Negative log-likelihood
            llh_sum = res.fun

            # Check if cumulated negative log likelihood is lower than the previous
            # one and select the lowest
            if llh_sum < min_llh:
                min_llh = llh_sum
                min_x = x

        # Compute BIC
        bic = compute_bic(min_llh, sum(values), len(df_subj))

        # Add results to list
        results_list = list()
        for i in range(len(min_x)):
            results_list.append(float(min_x[i]))

        # Extract group and ID for output
        group = int(pd.unique(df_subj["group"])[0])
        subj_num = int(pd.unique(df_subj["subj_num"])[0])
        id = pd.unique(df_subj["ID"])[0]

        # Add group and log-likelihood to output
        results_list.append(float(min_llh))
        results_list.append(float(bic))
        results_list.append(group)
        results_list.append(subj_num)
        results_list.append(id)

        return results_list

    def llh(self, coeffs: np.ndarray, df: pd.DataFrame) -> float:
        """Computes the likelihood of participant updates, given the specified parameters.

        Parameters
        ----------
        coeffs : np.ndarray
            Regression coefficients.
        df : pd.DataFrame
            Data frame containing subset of data.

        Returns
        -------
        float
            Summed negative log-likelihood.
        """

        # Initialize small value that replaces zero probabilities for numerical stability
        corrected_0_p = 1e-10

        # Extract parameters
        sel_coeffs = get_sel_coeffs(
            self.which_vars.items(), self.fixed_coeffs_reg, coeffs
        )

        # Linear regression component
        # ---------------------------

        # Create linear regression matrix
        lr_mat = df[self.which_update_regressors].to_numpy()

        # Linear regression parameters
        update_regressors = [
            value
            for key, value in sel_coeffs.items()
            if key not in ["omikron_0", "omikron_1", "lambda_0", "lambda_1"]
        ]

        # Compute predicted update
        a_t_hat = lr_mat @ np.array(update_regressors)  # matrix multiplication

        # Ensure value is in range [-pi, pi]
        a_t_hat = normalize_angle(a_t_hat)

        # Residuals
        if self.which_vars["omikron_1"]:

            # Compute updating noise
            concentration = residual_fun(
                abs(a_t_hat), sel_coeffs["omikron_0"], sel_coeffs["omikron_1"]
            )

        else:
            # Motor noise only
            concentration = np.repeat(sel_coeffs["omikron_0"], len(a_t_hat))

        # Compute probability density of update
        p_a_t = vonmises.pdf(df["a_t"], loc=a_t_hat, kappa=concentration)
        p_a_t[p_a_t == 0] = (
            corrected_0_p  # adjust zeros to small value for numerical stability
        )

        # Check for inf and nan
        if sum(np.isinf(p_a_t)) > 0:
            sys.exit("\np_a_t contains infs")
        elif sum(np.isnan(p_a_t)) > 0:
            sys.exit("\np_a_t contains nans")

        # Compute log-likelihood of linear regression
        llh_reg = np.log(p_a_t)

        # Check for inf and nan
        if sum(np.isinf(llh_reg)) > 0:
            sys.exit("llh_reg contains infs")
        elif sum(np.isnan(llh_reg)) > 0:
            sys.exit("llh_reg contains nans")

        # Identify perseveration trials
        pers = df["a_t"] == 0

        # Adjust for probabilities on the edge
        delta_fun = np.full(len(pers), np.nan)
        delta_fun[pers == 1] = 1 - corrected_0_p
        delta_fun[pers == 0] = corrected_0_p

        # Note: keep in mind that this has not been systematically validated with participant data
        # the setup works for simulated data based on a uniform distribution which has more large-scale PEs
        # than actual data. For actual data, some adjustments might be necessary, such as parameter ranges, parameter
        # combinations, or potentially in degrees instead of radians.
        lambda_t = None
        if self.which_vars["lambda_0"] and not self.which_vars["lambda_1"]:

            # Single average perseveration parameter lambda_0
            lambda_t = np.repeat(sel_coeffs["lambda_0"], len(pers))

        elif (not self.which_vars["lambda_0"] and self.which_vars["lambda_1"]) or (
            self.which_vars["lambda_0"] and self.which_vars["lambda_1"]
        ):

            # Logistic function combining both parameters
            lambda_t = compute_persprob(
                sel_coeffs["lambda_0"], sel_coeffs["lambda_1"], abs(a_t_hat)
            )

        if self.which_vars["lambda_0"] or self.which_vars["lambda_1"]:

            lambda_t[lambda_t == 0] = corrected_0_p
            lambda_t[lambda_t == 1] = 1 - corrected_0_p

            # Compute mixture between linear regression and perseveration model using lambda as weight
            llh_mix = logsumexp(
                [
                    np.log(delta_fun) + np.log(lambda_t),
                    np.log((1 - lambda_t)) + llh_reg,
                ],
                axis=0,
            )

            # Check for inf and nan
            if sum(np.isinf(llh_mix)) > 0:
                sys.exit("llh_mix contains infs")
            elif sum(np.isnan(llh_mix)) > 0:
                sys.exit("llh_mix contains nans")

            # Compute negative log-likelihood
            llh_sum = -1 * np.sum(llh_mix)

        else:
            # Compute negative log-likelihood
            llh_sum = -1 * np.sum(llh_reg)

        # Check for inf and nan
        if np.isinf(llh_sum) or np.isnan(llh_sum):
            sys.exit("\nllh incorrect")

        if self.use_prior:

            # Extract free parameters
            values = self.which_vars.values()

            # Prior mean and variance
            prior_mean = np.array(list(compress(self.prior_mean, values)))
            prior_width = np.array(list(compress(self.prior_width, values)))

            # Compute coefficient probabilites
            prior_prob = norm.pdf(coeffs, loc=prior_mean, scale=prior_width)

            # Set a minimum prior probability threshold before taking log
            prior_prob = np.maximum(prior_prob, corrected_0_p)

            # Adjust the negative log-likelihood
            llh_sum -= np.sum(np.log(prior_prob))

        return llh_sum

    @staticmethod
    def get_datamat(df_subj_input):
        """Raises an error if the get_datamat function is undefined in the
        project-specific regression.
        """
        raise NotImplementedError("Subclass needs to define this.")

    def get_starting_point(self):
        """Raises an error if the get_starting_point function is undefined in the
        project-specific regression.
        """
        raise NotImplementedError("Subclass needs to define this.")

    def sample_data(self, df_params, n_trials, all_sub_data=None):
        """Raises an error if the sample_data function is undefined in the
        project-specific regression.
        """
        raise NotImplementedError("Subclass needs to define this.")
__init__(reg_vars)

Defines the instance variables unique to each instance.

See project-specific RegVars in child class for documentation.

Parameters:

Name Type Description Default
reg_vars RegVars

Regression-variables-object instance defined in your project.

required
Source code in rbmpy/circular_regression/RegressionParent.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
def __init__(self, reg_vars: "RegVars"):
    """Defines the instance variables unique to each instance.

    See project-specific RegVars in child class for documentation.

    Parameters
    ----------
    reg_vars : RegVars
        Regression-variables-object instance defined in your project.
    """

    # Extract free parameters
    self.which_vars = reg_vars.which_vars

    # Extract fixed parameter values
    self.fixed_coeffs_reg = reg_vars.fixed_coeffs_reg

    # Extract prior mean and width
    self.prior_mean = reg_vars.prior_mean
    self.prior_width = reg_vars.prior_width

    # Extract other attributes
    self.n_subj = reg_vars.n_subj
    self.n_ker = reg_vars.n_ker
    self.seed = reg_vars.seed
    self.show_ind_prog = reg_vars.show_ind_prog
    self.rand_sp = reg_vars.rand_sp
    self.use_prior = reg_vars.use_prior
    self.n_sp = reg_vars.n_sp
    self.bnds = reg_vars.bnds
    self.which_update_regressors = reg_vars.which_update_regressors
parallel_estimation(df, prior_columns)

Manages the parallel estimation of the regression models.

Parameters:

Name Type Description Default
df DataFrame

Data frame containing the data.

required
prior_columns list

Selected parameters for regression.

required

Returns:

Type Description
DataFrame

Data frame containing regression results.

Source code in rbmpy/circular_regression/RegressionParent.py
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
def parallel_estimation(
    self, df: pd.DataFrame, prior_columns: list
) -> pd.DataFrame:
    """Manages the parallel estimation of the regression models.

    Parameters
    ----------
    df : pd.DataFrame
        Data frame containing the data.
    prior_columns : list
        Selected parameters for regression.

    Returns
    -------
    pd.DataFrame
        Data frame containing regression results.
    """

    # Inform user about progress
    pbar = None
    if self.show_ind_prog:
        # Inform user
        sleep(0.1)
        print("\nRegression model estimation:")
        sleep(0.1)

        # Initialize progress bar
        pbar = tqdm(total=self.n_subj)

    # Initialize pool object for parallel processing
    pool = Pool(processes=self.n_ker)

    # Estimate parameters in parallel
    results = [
        pool.apply_async(
            self.estimation,
            args=(df[(df["subj_num"] == i + 1)].copy(),),
            callback=lambda _: callback(self.show_ind_prog, pbar),
        )
        for i in range(0, self.n_subj)
    ]

    output = [p.get() for p in results]
    pool.close()
    pool.join()

    # Close progress bar
    if self.show_ind_prog and pbar:
        pbar.close()

    # Put all results in data frame
    values = self.which_vars.values()
    columns = list(compress(prior_columns, values))
    columns.append("llh")
    columns.append("BIC")
    columns.append("group")
    columns.append("subj_num")
    columns.append("ID")
    results_df = pd.DataFrame(output, columns=columns)

    return results_df
estimation(df_subj_input)

Estimates the coefficients of the circular regression model.

Parameters:

Name Type Description Default
df_subj_input DataFrame

Data frame containing subject-specific subset of data.

required

Returns:

Type Description
list

A list with the regression results.

Source code in rbmpy/circular_regression/RegressionParent.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
def estimation(self, df_subj_input: pd.DataFrame) -> list:
    """Estimates the coefficients of the circular regression model.

    Parameters
    ----------
    df_subj_input : pd.DataFrame
        Data frame containing subject-specific subset of data.

    Returns
    -------
    list
        A list with the regression results.
    """

    # Control random number generator for reproducible results
    np.random.seed(self.seed)

    # Get data matrix required for the model from child class
    df_subj = self.get_datamat(df_subj_input)

    # Adjust index of this subset of variables
    df_subj.reset_index().rename(columns={"index": "trial"})

    # Select starting points and boundaries
    # -------------------------------------

    # Extract free parameters
    values = self.which_vars.values()

    # Select boundaries according to free parameters
    bnds = list(compress(self.bnds, values))

    # Initialize with unrealistically high likelihood and no parameter estimate
    min_llh = 100000
    min_x = np.nan

    # Cycle over starting points
    for _ in range(0, self.n_sp):

        # Get project-specific starting points for child class
        x0 = self.get_starting_point()

        # Select starting points according to free parameters
        x0 = np.array(list(compress(x0, values)))

        # Estimate parameters
        res = minimize(
            self.llh,
            x0,
            args=(df_subj,),
            method="L-BFGS-B",
            options={"disp": False, "maxiter": 500},
            bounds=bnds,
        )

        # Parameter values
        x = res.x

        # Negative log-likelihood
        llh_sum = res.fun

        # Check if cumulated negative log likelihood is lower than the previous
        # one and select the lowest
        if llh_sum < min_llh:
            min_llh = llh_sum
            min_x = x

    # Compute BIC
    bic = compute_bic(min_llh, sum(values), len(df_subj))

    # Add results to list
    results_list = list()
    for i in range(len(min_x)):
        results_list.append(float(min_x[i]))

    # Extract group and ID for output
    group = int(pd.unique(df_subj["group"])[0])
    subj_num = int(pd.unique(df_subj["subj_num"])[0])
    id = pd.unique(df_subj["ID"])[0]

    # Add group and log-likelihood to output
    results_list.append(float(min_llh))
    results_list.append(float(bic))
    results_list.append(group)
    results_list.append(subj_num)
    results_list.append(id)

    return results_list
llh(coeffs, df)

Computes the likelihood of participant updates, given the specified parameters.

Parameters:

Name Type Description Default
coeffs ndarray

Regression coefficients.

required
df DataFrame

Data frame containing subset of data.

required

Returns:

Type Description
float

Summed negative log-likelihood.

Source code in rbmpy/circular_regression/RegressionParent.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
def llh(self, coeffs: np.ndarray, df: pd.DataFrame) -> float:
    """Computes the likelihood of participant updates, given the specified parameters.

    Parameters
    ----------
    coeffs : np.ndarray
        Regression coefficients.
    df : pd.DataFrame
        Data frame containing subset of data.

    Returns
    -------
    float
        Summed negative log-likelihood.
    """

    # Initialize small value that replaces zero probabilities for numerical stability
    corrected_0_p = 1e-10

    # Extract parameters
    sel_coeffs = get_sel_coeffs(
        self.which_vars.items(), self.fixed_coeffs_reg, coeffs
    )

    # Linear regression component
    # ---------------------------

    # Create linear regression matrix
    lr_mat = df[self.which_update_regressors].to_numpy()

    # Linear regression parameters
    update_regressors = [
        value
        for key, value in sel_coeffs.items()
        if key not in ["omikron_0", "omikron_1", "lambda_0", "lambda_1"]
    ]

    # Compute predicted update
    a_t_hat = lr_mat @ np.array(update_regressors)  # matrix multiplication

    # Ensure value is in range [-pi, pi]
    a_t_hat = normalize_angle(a_t_hat)

    # Residuals
    if self.which_vars["omikron_1"]:

        # Compute updating noise
        concentration = residual_fun(
            abs(a_t_hat), sel_coeffs["omikron_0"], sel_coeffs["omikron_1"]
        )

    else:
        # Motor noise only
        concentration = np.repeat(sel_coeffs["omikron_0"], len(a_t_hat))

    # Compute probability density of update
    p_a_t = vonmises.pdf(df["a_t"], loc=a_t_hat, kappa=concentration)
    p_a_t[p_a_t == 0] = (
        corrected_0_p  # adjust zeros to small value for numerical stability
    )

    # Check for inf and nan
    if sum(np.isinf(p_a_t)) > 0:
        sys.exit("\np_a_t contains infs")
    elif sum(np.isnan(p_a_t)) > 0:
        sys.exit("\np_a_t contains nans")

    # Compute log-likelihood of linear regression
    llh_reg = np.log(p_a_t)

    # Check for inf and nan
    if sum(np.isinf(llh_reg)) > 0:
        sys.exit("llh_reg contains infs")
    elif sum(np.isnan(llh_reg)) > 0:
        sys.exit("llh_reg contains nans")

    # Identify perseveration trials
    pers = df["a_t"] == 0

    # Adjust for probabilities on the edge
    delta_fun = np.full(len(pers), np.nan)
    delta_fun[pers == 1] = 1 - corrected_0_p
    delta_fun[pers == 0] = corrected_0_p

    # Note: keep in mind that this has not been systematically validated with participant data
    # the setup works for simulated data based on a uniform distribution which has more large-scale PEs
    # than actual data. For actual data, some adjustments might be necessary, such as parameter ranges, parameter
    # combinations, or potentially in degrees instead of radians.
    lambda_t = None
    if self.which_vars["lambda_0"] and not self.which_vars["lambda_1"]:

        # Single average perseveration parameter lambda_0
        lambda_t = np.repeat(sel_coeffs["lambda_0"], len(pers))

    elif (not self.which_vars["lambda_0"] and self.which_vars["lambda_1"]) or (
        self.which_vars["lambda_0"] and self.which_vars["lambda_1"]
    ):

        # Logistic function combining both parameters
        lambda_t = compute_persprob(
            sel_coeffs["lambda_0"], sel_coeffs["lambda_1"], abs(a_t_hat)
        )

    if self.which_vars["lambda_0"] or self.which_vars["lambda_1"]:

        lambda_t[lambda_t == 0] = corrected_0_p
        lambda_t[lambda_t == 1] = 1 - corrected_0_p

        # Compute mixture between linear regression and perseveration model using lambda as weight
        llh_mix = logsumexp(
            [
                np.log(delta_fun) + np.log(lambda_t),
                np.log((1 - lambda_t)) + llh_reg,
            ],
            axis=0,
        )

        # Check for inf and nan
        if sum(np.isinf(llh_mix)) > 0:
            sys.exit("llh_mix contains infs")
        elif sum(np.isnan(llh_mix)) > 0:
            sys.exit("llh_mix contains nans")

        # Compute negative log-likelihood
        llh_sum = -1 * np.sum(llh_mix)

    else:
        # Compute negative log-likelihood
        llh_sum = -1 * np.sum(llh_reg)

    # Check for inf and nan
    if np.isinf(llh_sum) or np.isnan(llh_sum):
        sys.exit("\nllh incorrect")

    if self.use_prior:

        # Extract free parameters
        values = self.which_vars.values()

        # Prior mean and variance
        prior_mean = np.array(list(compress(self.prior_mean, values)))
        prior_width = np.array(list(compress(self.prior_width, values)))

        # Compute coefficient probabilites
        prior_prob = norm.pdf(coeffs, loc=prior_mean, scale=prior_width)

        # Set a minimum prior probability threshold before taking log
        prior_prob = np.maximum(prior_prob, corrected_0_p)

        # Adjust the negative log-likelihood
        llh_sum -= np.sum(np.log(prior_prob))

    return llh_sum
get_datamat(df_subj_input) staticmethod

Raises an error if the get_datamat function is undefined in the project-specific regression.

Source code in rbmpy/circular_regression/RegressionParent.py
363
364
365
366
367
368
@staticmethod
def get_datamat(df_subj_input):
    """Raises an error if the get_datamat function is undefined in the
    project-specific regression.
    """
    raise NotImplementedError("Subclass needs to define this.")
get_starting_point()

Raises an error if the get_starting_point function is undefined in the project-specific regression.

Source code in rbmpy/circular_regression/RegressionParent.py
370
371
372
373
374
def get_starting_point(self):
    """Raises an error if the get_starting_point function is undefined in the
    project-specific regression.
    """
    raise NotImplementedError("Subclass needs to define this.")
sample_data(df_params, n_trials, all_sub_data=None)

Raises an error if the sample_data function is undefined in the project-specific regression.

Source code in rbmpy/circular_regression/RegressionParent.py
376
377
378
379
380
def sample_data(self, df_params, n_trials, all_sub_data=None):
    """Raises an error if the sample_data function is undefined in the
    project-specific regression.
    """
    raise NotImplementedError("Subclass needs to define this.")