From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Wed, 28 Jan 2015 17:01:34 +0200 Subject: [PATCH] mmc: block: register RPMB partition with the RPMB subsystem Register eMMC RPMB partition with the RPMB subsystem and provide implementation for the RPMB access operations abstracting actual multi step process. V2: resend V3: commit message fix V4: Kconfig: use select RPMB to ensure valid configuration Switch back to main area after RPMB access V5: Revamp code using new sequence command Support for 8K packets in e.MMC v5.1 V6: Resend. V7: Resend. V8: Rebase after block.c was moved under core/ Rebase for 4.14 V9: Rebase for 4.16 and 4.17 Build RPMB connection above ioctl layer Supply RPMB capabilities. Change-Id: I6de67f475ef738e30dc3b8c78185a1bee24595b2 Signed-off-by: Tomas Winkler Signed-off-by: Alexander Usyskin Tested-by: Avri Altman --- drivers/mmc/core/Kconfig | 1 + drivers/mmc/core/block.c | 216 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 217 insertions(+) diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 42e89060cd41..96c7ff63178c 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -36,6 +36,7 @@ config PWRSEQ_SIMPLE config MMC_BLOCK tristate "MMC block device driver" depends on BLOCK + select RPMB default y help Say Y here to enable the MMC block device driver support. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index c2c45c148ae7..9696ecb97927 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -44,6 +44,7 @@ #include #include #include +#include #include @@ -1096,6 +1097,217 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } +static int mmc_blk_rpmb_process(struct mmc_blk_data *md, + struct mmc_blk_ioc_data *idata[], + u64 num_of_cmds) +{ + struct mmc_card *card; + struct mmc_queue *mq; + int err = 0; + struct request *req; + int op_mode; + + card = md->queue.card; + if (IS_ERR(card)) { + err = PTR_ERR(card); + goto cmd_err; + } + + /* + * Dispatch the ioctl()s into the block request queue. + */ + mq = &md->queue; + op_mode = idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, + req = blk_get_request(mq->queue, op_mode, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_err; + } + + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL_RPMB; + req_to_mmc_queue_req(req)->drv_op_data = idata; + req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; + + blk_execute_rq(mq->queue, NULL, req, 0); + + err = req_to_mmc_queue_req(req)->drv_op_result; + + blk_put_request(req); + +cmd_err: + return err; +} + +static +struct mmc_blk_ioc_data *mmc_blk_rpmb_cmd_to_ioc_data(struct rpmb_cmd *cmd) +{ + struct mmc_blk_ioc_data *idata; + int err; + + idata = kzalloc(sizeof(*idata), GFP_KERNEL); + if (!idata) { + err = -ENOMEM; + goto out; + } + + if (cmd->flags & RPMB_F_WRITE) { + idata->ic.opcode = MMC_WRITE_MULTIPLE_BLOCK; + idata->ic.write_flag = 1; + if (cmd->flags & RPMB_F_REL_WRITE) + idata->ic.write_flag |= 1 << 31; + } else { + idata->ic.opcode = MMC_READ_MULTIPLE_BLOCK; + } + + /* nframes == 0 in case there is only meta data in the frame */ + idata->ic.blocks = cmd->nframes ?: 1; + idata->ic.blksz = 512; + + idata->buf_bytes = (u64)idata->ic.blksz * idata->ic.blocks; + if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { + err = -EOVERFLOW; + goto out; + } + + idata->buf = (unsigned char *)cmd->frames; + + return idata; +out: + kfree(idata); + return ERR_PTR(err); +} + +static int mmc_blk_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, + u32 num_of_cmds) +{ + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + struct mmc_blk_ioc_data **idata; + int err = 0; + u32 i; + + if (!rpmb) + return -ENODEV; + + idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); + if (!idata) + return -ENOMEM; + + for (i = 0; i < num_of_cmds; i++) { + idata[i] = mmc_blk_rpmb_cmd_to_ioc_data(&cmds[i]); + if (IS_ERR(idata[i])) { + err = PTR_ERR(idata[i]); + num_of_cmds = i; + goto cmd_err; + } + idata[i]->rpmb = rpmb; + } + + get_device(&rpmb->dev); + mmc_blk_get(rpmb->md->disk); + + err = mmc_blk_rpmb_process(rpmb->md, idata, num_of_cmds); + +cmd_err: + for (i = 0; i < num_of_cmds; i++) + kfree(idata[i]); + + kfree(idata); + + put_device(&rpmb->dev); + mmc_blk_put(rpmb->md); + + return err; +} + +static int mmc_blk_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + struct mmc_card *card; + + card = rpmb->md->queue.card; + return card->ext_csd.raw_rpmb_size_mult; +} + +static struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .get_capacity = mmc_blk_rpmb_get_capacity, + .type = RPMB_TYPE_EMMC, + .auth_method = RPMB_HMAC_ALGO_SHA_256, +}; + +static void mmc_blk_rpmb_unset_dev_id(struct rpmb_ops *ops) +{ + kfree(ops->dev_id); + ops->dev_id = NULL; +} + +static int mmc_blk_rpmb_set_dev_id(struct rpmb_ops *ops, struct mmc_card *card) +{ + char *id; + + id = kmalloc(sizeof(card->raw_cid), GFP_KERNEL); + if (!id) + return -ENOMEM; + + memcpy(id, card->raw_cid, sizeof(card->raw_cid)); + ops->dev_id = id; + ops->dev_id_len = sizeof(card->raw_cid); + + return 0; +} + +static void mmc_blk_rpmb_set_cap(struct rpmb_ops *ops, + struct mmc_card *card) +{ + u16 rel_wr_cnt; + + /* RPMB blocks are written in half sectors hence '* 2' */ + rel_wr_cnt = card->ext_csd.rel_sectors * 2; + /* eMMC 5.1 may support RPMB 8K (32) frames */ + if (card->ext_csd.rev >= 8) { + if (card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) + rel_wr_cnt = 32; + else + rel_wr_cnt = 2; + } + ops->wr_cnt_max = rel_wr_cnt; + ops->rd_cnt_max = card->host->max_blk_count; + ops->block_size = 1; /* 256B */ +} + +static void mmc_blk_rpmb_add(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct rpmb_dev *rdev; + struct mmc_rpmb_data *rpmb; + u8 i = 0; + + mmc_blk_rpmb_set_dev_id(&mmc_rpmb_dev_ops, card); + mmc_blk_rpmb_set_cap(&mmc_rpmb_dev_ops, card); + + /* Add RPMB partitions */ + list_for_each_entry(rpmb, &md->rpmbs, node) { + rdev = rpmb_dev_register(&rpmb->dev, i++, &mmc_rpmb_dev_ops); + if (IS_ERR(rdev)) { + pr_warn("%s: cannot register to rpmb %ld\n", + dev_name(&rpmb->dev), PTR_ERR(rdev)); + } + } +} + +static void mmc_blk_rpmb_remove(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_rpmb_data *rpmb; + u8 i = 0; + + list_for_each_entry(rpmb, &md->rpmbs, node) + rpmb_dev_unregister_by_device(&rpmb->dev, i++); + + mmc_blk_rpmb_unset_dev_id(&mmc_rpmb_dev_ops); +} + static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->blkdata; @@ -2932,6 +3144,9 @@ static int mmc_blk_probe(struct mmc_card *card) goto out; } + /* Add rpmb layer */ + mmc_blk_rpmb_add(card); + /* Add two debugfs entries */ mmc_blk_add_debugfs(card, md); @@ -2960,6 +3175,7 @@ static void mmc_blk_remove(struct mmc_card *card) struct mmc_blk_data *md = dev_get_drvdata(&card->dev); mmc_blk_remove_debugfs(card, md); + mmc_blk_rpmb_remove(card); mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); if (md->part_curr != md->part_type) { -- https://clearlinux.org