* ufshcd_suspend() re-enabling regulators while vreg is still
* in low-power mode.
*/
- ufs_mtk_vreg_set_lpm(hba, true);
err = ufs_mtk_mphy_power_on(hba, false);
if (err)
goto fail;
{
int err;
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+ ufs_mtk_vreg_set_lpm(hba, false);
+
err = ufs_mtk_mphy_power_on(hba, true);
if (err)
goto fail;
- ufs_mtk_vreg_set_lpm(hba, false);
-
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
return 0;
}
+int ufs_mtk_system_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+int ufs_mtk_system_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_vreg_set_lpm(hba, false);
+
+ return ufshcd_system_resume(dev);
+}
+
+int ufs_mtk_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = ufshcd_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ufs_mtk_vreg_set_lpm(hba, true);
+
+ return 0;
+}
+
+int ufs_mtk_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufs_mtk_vreg_set_lpm(hba, false);
+
+ return ufshcd_runtime_resume(dev);
+}
+
static const struct dev_pm_ops ufs_mtk_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
- SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
+ ufs_mtk_system_resume)
+ SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
+ ufs_mtk_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};