/* * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Western Digital Corporation or its affiliates. * * Authors: * Atish Patra */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define __sbi_hsm_hart_change_state(hdata, oldstate, newstate) \ ({ \ long state = atomic_cmpxchg(&(hdata)->state, oldstate, newstate); \ if (state != (oldstate)) \ sbi_printf("%s: ERR: The hart is in invalid state [%lu]\n", \ __func__, state); \ state == (oldstate); \ }) static const struct sbi_hsm_device *hsm_dev = NULL; static unsigned long hart_data_offset; /** Per hart specific data to manage state transition **/ struct sbi_hsm_data { atomic_t state; unsigned long suspend_type; unsigned long saved_mie; unsigned long saved_mip; }; bool sbi_hsm_hart_change_state(struct sbi_scratch *scratch, long oldstate, long newstate) { struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); return __sbi_hsm_hart_change_state(hdata, oldstate, newstate); } int __sbi_hsm_hart_get_state(u32 hartid) { struct sbi_hsm_data *hdata; struct sbi_scratch *scratch; scratch = sbi_hartid_to_scratch(hartid); if (!scratch) return SBI_EINVAL; hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); return atomic_read(&hdata->state); } int sbi_hsm_hart_get_state(const struct sbi_domain *dom, u32 hartid) { if (!sbi_domain_is_assigned_hart(dom, hartid)) return SBI_EINVAL; return __sbi_hsm_hart_get_state(hartid); } /** * Get ulong HART mask for given HART base ID * @param dom the domain to be used for output HART mask * @param hbase the HART base ID * @param out_hmask the output ulong HART mask * @return 0 on success and SBI_Exxx (< 0) on failure * Note: the output HART mask will be set to zero on failure as well. */ int sbi_hsm_hart_interruptible_mask(const struct sbi_domain *dom, ulong hbase, ulong *out_hmask) { int hstate; ulong i, hmask, dmask; ulong hend = sbi_scratch_last_hartid() + 1; *out_hmask = 0; if (hend <= hbase) return SBI_EINVAL; if (BITS_PER_LONG < (hend - hbase)) hend = hbase + BITS_PER_LONG; dmask = sbi_domain_get_assigned_hartmask(dom, hbase); for (i = hbase; i < hend; i++) { hmask = 1UL << (i - hbase); if (dmask & hmask) { hstate = __sbi_hsm_hart_get_state(i); if (hstate == SBI_HSM_STATE_STARTED || hstate == SBI_HSM_STATE_SUSPENDED) *out_hmask |= hmask; } } return 0; } void __noreturn sbi_hsm_hart_start_finish(struct sbi_scratch *scratch, u32 hartid) { struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_START_PENDING, SBI_HSM_STATE_STARTED)) sbi_hart_hang(); sbi_hart_switch_mode(hartid, scratch->next_arg1, scratch->next_addr, scratch->next_mode, false); } static void sbi_hsm_hart_wait(struct sbi_scratch *scratch, u32 hartid) { unsigned long saved_mie; struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); /* Save MIE CSR */ saved_mie = csr_read(CSR_MIE); /* Set MSIE and MEIE bits to receive IPI */ csr_set(CSR_MIE, MIP_MSIP | MIP_MEIP); /* Wait for state transition requested by sbi_hsm_hart_start() */ while (atomic_read(&hdata->state) != SBI_HSM_STATE_START_PENDING) { wfi(); }; /* Restore MIE CSR */ csr_write(CSR_MIE, saved_mie); /* * No need to clear IPI here because the sbi_ipi_init() will * clear it for current HART via sbi_platform_ipi_init(). */ } const struct sbi_hsm_device *sbi_hsm_get_device(void) { return hsm_dev; } void sbi_hsm_set_device(const struct sbi_hsm_device *dev) { if (!dev || hsm_dev) return; hsm_dev = dev; } static bool hsm_device_has_hart_hotplug(void) { if (hsm_dev && hsm_dev->hart_start && hsm_dev->hart_stop) return true; return false; } static bool hsm_device_has_hart_secondary_boot(void) { if (hsm_dev && hsm_dev->hart_start && !hsm_dev->hart_stop) return true; return false; } static int hsm_device_hart_start(u32 hartid, ulong saddr) { if (hsm_dev && hsm_dev->hart_start) return hsm_dev->hart_start(hartid, saddr); return SBI_ENOTSUPP; } static int hsm_device_hart_stop(void) { if (hsm_dev && hsm_dev->hart_stop) return hsm_dev->hart_stop(); return SBI_ENOTSUPP; } static int hsm_device_hart_suspend(u32 suspend_type) { if (hsm_dev && hsm_dev->hart_suspend) return hsm_dev->hart_suspend(suspend_type); return SBI_ENOTSUPP; } static void hsm_device_hart_resume(void) { if (hsm_dev && hsm_dev->hart_resume) hsm_dev->hart_resume(); } int sbi_hsm_init(struct sbi_scratch *scratch, u32 hartid, bool cold_boot) { u32 i; struct sbi_scratch *rscratch; struct sbi_hsm_data *hdata; if (cold_boot) { hart_data_offset = sbi_scratch_alloc_offset(sizeof(*hdata)); if (!hart_data_offset) return SBI_ENOMEM; /* Initialize hart state data for every hart */ for (i = 0; i <= sbi_scratch_last_hartid(); i++) { rscratch = sbi_hartid_to_scratch(i); if (!rscratch) continue; hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset); ATOMIC_INIT(&hdata->state, (i == hartid) ? SBI_HSM_STATE_START_PENDING : SBI_HSM_STATE_STOPPED); } } else { sbi_hsm_hart_wait(scratch, hartid); } return 0; } void __noreturn sbi_hsm_exit(struct sbi_scratch *scratch) { struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr; if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STOP_PENDING, SBI_HSM_STATE_STOPPED)) goto fail_exit; if (hsm_device_has_hart_hotplug()) { if (hsm_device_hart_stop() != SBI_ENOTSUPP) goto fail_exit; } /** * As platform is lacking support for hotplug, directly jump to warmboot * and wait for interrupts in warmboot. We do it preemptively in order * preserve the hart states and reuse the code path for hotplug. */ jump_warmboot(); fail_exit: /* It should never reach here */ sbi_printf("ERR: Failed stop hart [%u]\n", current_hartid()); sbi_hart_hang(); } int sbi_hsm_hart_start(struct sbi_scratch *scratch, const struct sbi_domain *dom, u32 hartid, ulong saddr, ulong smode, ulong arg1) { unsigned long init_count; unsigned int hstate; struct sbi_scratch *rscratch; struct sbi_hsm_data *hdata; /* For now, we only allow start mode to be S-mode or U-mode. */ if (smode != PRV_S && smode != PRV_U) return SBI_EINVAL; if (dom && !sbi_domain_is_assigned_hart(dom, hartid)) return SBI_EINVAL; if (dom && !sbi_domain_check_addr(dom, saddr, smode, SBI_DOMAIN_EXECUTE)) return SBI_EINVALID_ADDR; rscratch = sbi_hartid_to_scratch(hartid); if (!rscratch) return SBI_EINVAL; hdata = sbi_scratch_offset_ptr(rscratch, hart_data_offset); hstate = atomic_cmpxchg(&hdata->state, SBI_HSM_STATE_STOPPED, SBI_HSM_STATE_START_PENDING); if (hstate == SBI_HSM_STATE_STARTED) return SBI_EALREADY; /** * if a hart is already transition to start or stop, another start call * is considered as invalid request. */ if (hstate != SBI_HSM_STATE_STOPPED) return SBI_EINVAL; init_count = sbi_init_count(hartid); rscratch->next_arg1 = arg1; rscratch->next_addr = saddr; rscratch->next_mode = smode; if (hsm_device_has_hart_hotplug() || (hsm_device_has_hart_secondary_boot() && !init_count)) { return hsm_device_hart_start(hartid, scratch->warmboot_addr); } else { int rc = sbi_ipi_raw_send(hartid); if (rc) return rc; } return 0; } int sbi_hsm_hart_stop(struct sbi_scratch *scratch, bool exitnow) { const struct sbi_domain *dom = sbi_domain_thishart_ptr(); struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); if (!dom) return SBI_EFAIL; if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STARTED, SBI_HSM_STATE_STOP_PENDING)) return SBI_EFAIL; if (exitnow) sbi_exit(scratch); return 0; } static int __sbi_hsm_suspend_default(struct sbi_scratch *scratch) { /* Wait for interrupt */ wfi(); return 0; } void __sbi_hsm_suspend_non_ret_save(struct sbi_scratch *scratch) { struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); /* * We will be resuming in warm-boot path so the MIE and MIP CSRs * will be back to initial state. It is possible that HART has * configured timer event before going to suspend state so we * should save MIE and MIP CSRs and restore it after resuming. * * Further, the M-mode bits in MIP CSR are read-only and set by * external devices (such as interrupt controller) whereas all * VS-mode bits in MIP are read-only alias of bits in HVIP CSR. * * This means we should only save/restore S-mode bits of MIP CSR * such as MIP.SSIP and MIP.STIP. */ hdata->saved_mie = csr_read(CSR_MIE); hdata->saved_mip = csr_read(CSR_MIP) & (MIP_SSIP | MIP_STIP); } static void __sbi_hsm_suspend_non_ret_restore(struct sbi_scratch *scratch) { struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); csr_write(CSR_MIE, hdata->saved_mie); csr_set(CSR_MIP, (hdata->saved_mip & (MIP_SSIP | MIP_STIP))); } void sbi_hsm_hart_resume_start(struct sbi_scratch *scratch) { struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); /* If current HART was SUSPENDED then set RESUME_PENDING state */ if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_SUSPENDED, SBI_HSM_STATE_RESUME_PENDING)) sbi_hart_hang(); hsm_device_hart_resume(); } void __noreturn sbi_hsm_hart_resume_finish(struct sbi_scratch *scratch, u32 hartid) { struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); /* If current HART was RESUME_PENDING then set STARTED state */ if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_RESUME_PENDING, SBI_HSM_STATE_STARTED)) sbi_hart_hang(); /* * Restore some of the M-mode CSRs which we are re-configured by * the warm-boot sequence. */ __sbi_hsm_suspend_non_ret_restore(scratch); sbi_hart_switch_mode(hartid, scratch->next_arg1, scratch->next_addr, scratch->next_mode, false); } int sbi_hsm_hart_suspend(struct sbi_scratch *scratch, u32 suspend_type, ulong raddr, ulong rmode, ulong arg1) { int ret; const struct sbi_domain *dom = sbi_domain_thishart_ptr(); struct sbi_hsm_data *hdata = sbi_scratch_offset_ptr(scratch, hart_data_offset); /* Sanity check on domain assigned to current HART */ if (!dom) return SBI_EFAIL; /* Sanity check on suspend type */ if (SBI_HSM_SUSPEND_RET_DEFAULT < suspend_type && suspend_type < SBI_HSM_SUSPEND_RET_PLATFORM) return SBI_EINVAL; if (SBI_HSM_SUSPEND_NON_RET_DEFAULT < suspend_type && suspend_type < SBI_HSM_SUSPEND_NON_RET_PLATFORM) return SBI_EINVAL; /* Additional sanity check for non-retentive suspend */ if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT) { /* * For now, we only allow non-retentive suspend from * S-mode or U-mode. */ if (rmode != PRV_S && rmode != PRV_U) return SBI_EFAIL; if (dom && !sbi_domain_check_addr(dom, raddr, rmode, SBI_DOMAIN_EXECUTE)) return SBI_EINVALID_ADDR; } /* Save the resume address and resume mode */ scratch->next_arg1 = arg1; scratch->next_addr = raddr; scratch->next_mode = rmode; /* Directly move from STARTED to SUSPENDED state */ if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_STARTED, SBI_HSM_STATE_SUSPENDED)) return SBI_EFAIL; /* Save the suspend type */ hdata->suspend_type = suspend_type; /* * Save context which will be restored after resuming from * non-retentive suspend. */ if (suspend_type & SBI_HSM_SUSP_NON_RET_BIT) __sbi_hsm_suspend_non_ret_save(scratch); /* Try platform specific suspend */ ret = hsm_device_hart_suspend(suspend_type); if (ret == SBI_ENOTSUPP) { /* Try generic implementation of default suspend types */ if (suspend_type == SBI_HSM_SUSPEND_RET_DEFAULT || suspend_type == SBI_HSM_SUSPEND_NON_RET_DEFAULT) { ret = __sbi_hsm_suspend_default(scratch); } } /* * The platform may have coordinated a retentive suspend, or it may * have exited early from a non-retentive suspend. Either way, the * caller is not expecting a successful return, so jump to the warm * boot entry point to simulate resume from a non-retentive suspend. */ if (ret == 0 && (suspend_type & SBI_HSM_SUSP_NON_RET_BIT)) { void (*jump_warmboot)(void) = (void (*)(void))scratch->warmboot_addr; jump_warmboot(); } /* * We might have successfully resumed from retentive suspend * or suspend failed. In both cases, we restore state of hart. */ if (!__sbi_hsm_hart_change_state(hdata, SBI_HSM_STATE_SUSPENDED, SBI_HSM_STATE_STARTED)) sbi_hart_hang(); return ret; }