lass="p">; i < tries; i++) { thr_init_sts.value = ioread32be(MTM_THR_INIT_STS(cpu)); if (thr_init_sts.thr_id == thr_init.thr_id) { if (thr_init_sts.bsy) continue; else if (thr_init_sts.err) pr_warn("Failed to thread init cpu %u\n", cpu); break; } pr_warn("Wrong thread id in thread init for cpu %u\n", cpu); break; } if (i == tries) pr_warn("Got thread init timeout for cpu %u\n", cpu); } int mtm_enable_thread(int cpu) { struct nps_host_reg_mtm_cfg mtm_cfg; if (NPS_CPU_TO_THREAD_NUM(cpu) == 0) return 1; /* Enable thread in mtm */ mtm_cfg.value = ioread32be(MTM_CFG(cpu)); mtm_cfg.ten |= (1 << (NPS_CPU_TO_THREAD_NUM(cpu))); iowrite32be(mtm_cfg.value, MTM_CFG(cpu)); return 0; } void mtm_enable_core(unsigned int cpu) { int i; struct nps_host_reg_aux_mt_ctrl mt_ctrl; struct nps_host_reg_mtm_cfg mtm_cfg; if (NPS_CPU_TO_THREAD_NUM(cpu) != 0) return; /* Initialize Number of Active Threads */ mtm_init_nat(cpu); /* Initialize mtm_cfg */ mtm_cfg.value = ioread32be(MTM_CFG(cpu)); mtm_cfg.ten = 1; iowrite32be(mtm_cfg.value, MTM_CFG(cpu)); /* Initialize all other threads in core */ for (i = 1; i < NPS_NUM_HW_THREADS; i++) mtm_init_thread(cpu + i); /* Enable HW schedule, stall counter, mtm */ mt_ctrl.value = 0; mt_ctrl.hsen = 1; mt_ctrl.hs_cnt = MT_CTRL_HS_CNT; mt_ctrl.sten = 1; mt_ctrl.st_cnt = MT_CTRL_ST_CNT; mt_ctrl.mten = 1; write_aux_reg(CTOP_AUX_MT_CTRL, mt_ctrl.value); /* * HW scheduling mechanism will start working * Only after call to instruction "schd.rw". * cpu_relax() calls "schd.rw" instruction. */ cpu_relax(); }