blob: ab1c332c3856ce41e9a6c55e401760069873fff2 [file] [log] [blame]
Andrew Jeffery1e531af2018-08-07 13:32:57 +09301// SPDX-License-Identifier: Apache-2.0
2// Copyright (C) 2018 IBM Corp.
3#include "config.h"
4
5#include <errno.h>
6#include <stdint.h>
Evan Lojewskif1e547c2019-03-14 14:34:33 +10307#include <stdio.h>
Stewart Smithef0c8362018-11-19 13:49:46 +11008#include <unistd.h>
Andrew Jeffery1e531af2018-08-07 13:32:57 +09309
Evan Lojewskif1e547c2019-03-14 14:34:33 +103010#include "backend.h"
Andrew Jeffery5335f092018-08-09 14:56:08 +093011#include "common.h"
Andrew Jeffery1e531af2018-08-07 13:32:57 +093012#include "lpc.h"
Andrew Jefferycb935042019-03-15 09:54:33 +103013#include "mboxd.h"
14#include "protocol.h"
Andrew Jeffery1e531af2018-08-07 13:32:57 +093015#include "windows.h"
16
Stewart Smithef0c8362018-11-19 13:49:46 +110017
Andrew Jeffery26558db2018-08-10 00:22:38 +093018#define BLOCK_SIZE_SHIFT_V1 12 /* 4K */
19
Andrew Jeffery0453aa42018-08-21 08:25:46 +093020static inline uint8_t protocol_get_bmc_event_mask(struct mbox_context *context)
21{
22 if (context->version == API_VERSION_1) {
23 return BMC_EVENT_V1_MASK;
24 }
25
26 return BMC_EVENT_V2_MASK;
27}
28
Andrew Jeffery5335f092018-08-09 14:56:08 +093029/*
Andrew Jefferyfe0c9e82018-11-01 14:02:17 +103030 * protocol_events_put() - Push the full set/cleared state of BMC events on the
31 * provided transport
32 * @context: The mbox context pointer
33 * @ops: The operations struct for the transport of interest
34 *
35 * Return: 0 on success otherwise negative error code
36 */
37int protocol_events_put(struct mbox_context *context,
38 const struct transport_ops *ops)
39{
40 const uint8_t mask = protocol_get_bmc_event_mask(context);
41
42 return ops->put_events(context, mask);
43}
44
45/*
46 * protocol_events_set() - Update the set BMC events on the active transport
Andrew Jeffery5335f092018-08-09 14:56:08 +093047 * @context: The mbox context pointer
48 * @bmc_event: The bits to set
Andrew Jeffery5335f092018-08-09 14:56:08 +093049 *
50 * Return: 0 on success otherwise negative error code
51 */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +093052int protocol_events_set(struct mbox_context *context, uint8_t bmc_event)
Andrew Jeffery5335f092018-08-09 14:56:08 +093053{
Andrew Jeffery0453aa42018-08-21 08:25:46 +093054 const uint8_t mask = protocol_get_bmc_event_mask(context);
Andrew Jeffery5335f092018-08-09 14:56:08 +093055
Andrew Jeffery0453aa42018-08-21 08:25:46 +093056 /*
57 * Store the raw value, as we may up- or down- grade the protocol
58 * version and subsequently need to flush the appropriate set. Instead
59 * we pass the masked value through to the transport
60 */
61 context->bmc_events |= bmc_event;
Andrew Jeffery5335f092018-08-09 14:56:08 +093062
Andrew Jefferyf62601b2018-11-01 13:44:25 +103063 return context->transport->set_events(context, bmc_event, mask);
Andrew Jeffery5335f092018-08-09 14:56:08 +093064}
65
66/*
Andrew Jefferyfe0c9e82018-11-01 14:02:17 +103067 * protocol_events_clear() - Update the cleared BMC events on the active
68 * transport
Andrew Jeffery5335f092018-08-09 14:56:08 +093069 * @context: The mbox context pointer
70 * @bmc_event: The bits to clear
Andrew Jeffery5335f092018-08-09 14:56:08 +093071 *
72 * Return: 0 on success otherwise negative error code
73 */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +093074int protocol_events_clear(struct mbox_context *context, uint8_t bmc_event)
Andrew Jeffery5335f092018-08-09 14:56:08 +093075{
Andrew Jeffery0453aa42018-08-21 08:25:46 +093076 const uint8_t mask = protocol_get_bmc_event_mask(context);
77
78 context->bmc_events &= ~bmc_event;
79
Andrew Jefferyf62601b2018-11-01 13:44:25 +103080 return context->transport->clear_events(context, bmc_event, mask);
Andrew Jeffery5335f092018-08-09 14:56:08 +093081}
82
Evan Lojewskif1e547c2019-03-14 14:34:33 +103083static int protocol_negotiate_version(struct mbox_context *context,
84 uint8_t requested);
85
Andrew Jefferycb935042019-03-15 09:54:33 +103086static int protocol_v1_reset(struct mbox_context *context)
Andrew Jefferyab666a52018-08-07 14:28:09 +093087{
Evan Lojewskif1e547c2019-03-14 14:34:33 +103088 return __protocol_reset(context);
Andrew Jefferyab666a52018-08-07 14:28:09 +093089}
90
Andrew Jefferycb935042019-03-15 09:54:33 +103091static int protocol_negotiate_version(struct mbox_context *context,
92 uint8_t requested);
93
94static int protocol_v1_get_info(struct mbox_context *context,
95 struct protocol_get_info *io)
Andrew Jeffery1e531af2018-08-07 13:32:57 +093096{
97 uint8_t old_version = context->version;
98 int rc;
99
100 /* Bootstrap protocol version. This may involve {up,down}grading */
101 rc = protocol_negotiate_version(context, io->req.api_version);
102 if (rc < 0)
103 return rc;
104
105 /* Do the {up,down}grade if necessary*/
106 if (rc != old_version) {
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930107 /* Doing version negotiation, don't alert host to reset */
108 windows_reset_all(context);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930109 return context->protocol->get_info(context, io);
110 }
111
112 /* Record the negotiated version for the response */
113 io->resp.api_version = rc;
114
115 /* Now do all required intialisation for v1 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030116 context->backend.block_size_shift = BLOCK_SIZE_SHIFT_V1;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930117 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030118 1 << context->backend.block_size_shift, context->backend.block_size_shift);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930119
120 /* Knowing blocksize we can allocate the window dirty_bytemap */
121 windows_alloc_dirty_bytemap(context);
122
123 io->resp.v1.read_window_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030124 context->windows.default_size >> context->backend.block_size_shift;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930125 io->resp.v1.write_window_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030126 context->windows.default_size >> context->backend.block_size_shift;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930127
128 return lpc_map_memory(context);
129}
130
Andrew Jefferycb935042019-03-15 09:54:33 +1030131static int protocol_v1_get_flash_info(struct mbox_context *context,
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030132 struct protocol_get_flash_info *io)
Andrew Jeffery91a87452018-08-07 14:54:14 +0930133{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030134 io->resp.v1.flash_size = context->backend.flash_size;
135 io->resp.v1.erase_size = 1 << context->backend.erase_size_shift;
Andrew Jeffery91a87452018-08-07 14:54:14 +0930136
137 return 0;
138}
139
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930140/*
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930141 * get_lpc_addr_shifted() - Get lpc address of the current window
142 * @context: The mbox context pointer
143 *
144 * Return: The lpc address to access that offset shifted by block size
145 */
146static inline uint16_t get_lpc_addr_shifted(struct mbox_context *context)
147{
148 uint32_t lpc_addr, mem_offset;
149
150 /* Offset of the current window in the reserved memory region */
151 mem_offset = context->current->mem - context->mem;
152 /* Total LPC Address */
153 lpc_addr = context->lpc_base + mem_offset;
154
155 MSG_DBG("LPC address of current window: 0x%.8x\n", lpc_addr);
156
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030157 return lpc_addr >> context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930158}
159
Stewart Smithef0c8362018-11-19 13:49:46 +1100160static inline int64_t blktrace_gettime(void)
161{
162 struct timespec ts;
163 int64_t n;
164
165 clock_gettime(CLOCK_REALTIME, &ts);
166 n = (int64_t)(ts.tv_sec) * (int64_t)1000000000 + (int64_t)(ts.tv_nsec);
167
168 return n;
169}
170
171static void blktrace_flush_start(struct mbox_context *context)
172{
173 struct blk_io_trace *trace = &context->trace;
174 struct timespec now;
175
176 if (!context->blktracefd)
177 return;
178
179 if (!context->blktrace_start) {
180 clock_gettime(CLOCK_REALTIME, &now);
181 context->blktrace_start = blktrace_gettime();
182 }
183
184 trace->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
185 trace->sequence++;
186 trace->time = blktrace_gettime() - context->blktrace_start;
187 trace->sector = context->current->flash_offset / 512;
188 trace->bytes = context->current->size;
189 if (context->current_is_write)
190 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_WRITE);
191 else
192 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_READ);
193 trace->pid = 0;
194 trace->device = 0;
195 trace->cpu = 0;
196 trace->error = 0;
197 trace->pdu_len = 0;
198 write(context->blktracefd, trace, sizeof(*trace));
199 trace->sequence++;
200 trace->time = blktrace_gettime() - context->blktrace_start;
201 trace->action &= ~BLK_TA_QUEUE;
202 trace->action |= BLK_TA_ISSUE;
203 write(context->blktracefd, trace, sizeof(*trace));
204}
205
206static void blktrace_flush_done(struct mbox_context *context)
207{
208 struct blk_io_trace *trace = &context->trace;
209
210 if (!context->blktracefd)
211 return;
212
213 trace->sequence++;
214 trace->time = blktrace_gettime() - context->blktrace_start;
215 trace->action &= ~BLK_TA_ISSUE;
216 trace->action |= BLK_TA_COMPLETE;
217 write(context->blktracefd, trace, sizeof(*trace));
218}
219
220static void blktrace_window_start(struct mbox_context *context)
221{
222 struct blk_io_trace *trace = &context->trace;
223
224 if (!context->blktracefd)
225 return;
226
227 if (!context->blktrace_start)
228 context->blktrace_start = blktrace_gettime();
229
230 trace->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
231 trace->sequence++;
232 trace->time = blktrace_gettime() - context->blktrace_start;
233 trace->action = BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_READ);
234 trace->pid = 0;
235 trace->device = 0;
236 trace->cpu = 0;
237 trace->error = 0;
238 trace->pdu_len = 0;
239}
240
241static void blktrace_window_done(struct mbox_context *context)
242{
243 struct blk_io_trace *trace = &context->trace;
244
245 if (!context->blktracefd)
246 return;
247
248 trace->sector = context->current->flash_offset / 512;
249 trace->bytes = context->current->size;
250 write(context->blktracefd, trace, sizeof(*trace));
251 trace->sequence++;
252 trace->action &= ~BLK_TA_QUEUE;
253 trace->action |= BLK_TA_ISSUE;
254 write(context->blktracefd, trace, sizeof(*trace));
255
256 trace->sequence++;
257 trace->time = blktrace_gettime() - context->blktrace_start;
258 trace->action &= ~BLK_TA_ISSUE;
259 trace->action |= BLK_TA_COMPLETE;
260 write(context->blktracefd, trace, sizeof(*trace));
261}
262
Andrew Jefferycb935042019-03-15 09:54:33 +1030263static int protocol_v1_create_window(struct mbox_context *context,
264 struct protocol_create_window *io)
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930265{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030266 struct backend *backend = &context->backend;
267 uint32_t offset;
268 uint32_t size;
Andrew Jefferycb935042019-03-15 09:54:33 +1030269 int rc;
270
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030271 offset = io->req.offset << backend->block_size_shift;
272 size = io->req.size << backend->block_size_shift;
273 rc = backend_validate(backend, offset, size, io->req.ro);
Andrew Jefferycb935042019-03-15 09:54:33 +1030274 if (rc < 0) {
275 /* Backend does not allow window to be created. */
276 return rc;
277 }
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930278
279 /* Close the current window if there is one */
280 if (context->current) {
Andrew Jefferyf21c81c2018-08-09 13:57:46 +0930281 /* There is an implicit flush if it was a write window
282 *
283 * protocol_v2_create_window() calls
284 * protocol_v1_create_window(), so use indirect call to
285 * write_flush() to make sure we pick the right one.
286 */
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930287 if (context->current_is_write) {
Stewart Smithef0c8362018-11-19 13:49:46 +1100288 blktrace_flush_start(context);
Andrew Jefferyf21c81c2018-08-09 13:57:46 +0930289 rc = context->protocol->flush(context, NULL);
Stewart Smithef0c8362018-11-19 13:49:46 +1100290 blktrace_flush_done(context);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930291 if (rc < 0) {
292 MSG_ERR("Couldn't Flush Write Window\n");
293 return rc;
294 }
295 }
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930296 windows_close_current(context, FLAGS_NONE);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930297 }
298
299 /* Offset the host has requested */
300 MSG_INFO("Host requested flash @ 0x%.8x\n", offset);
301 /* Check if we have an existing window */
Stewart Smithef0c8362018-11-19 13:49:46 +1100302 blktrace_window_start(context);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930303 context->current = windows_search(context, offset,
304 context->version == API_VERSION_1);
305
306 if (!context->current) { /* No existing window */
307 MSG_DBG("No existing window which maps that flash offset\n");
308 rc = windows_create_map(context, &context->current,
309 offset,
310 context->version == API_VERSION_1);
311 if (rc < 0) { /* Unable to map offset */
312 MSG_ERR("Couldn't create window mapping for offset 0x%.8x\n",
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930313 offset);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930314 return rc;
315 }
316 }
Stewart Smithef0c8362018-11-19 13:49:46 +1100317 blktrace_window_done(context);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930318
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930319 context->current_is_write = !io->req.ro;
320
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930321 MSG_INFO("Window @ %p for size 0x%.8x maps flash offset 0x%.8x\n",
322 context->current->mem, context->current->size,
323 context->current->flash_offset);
324
325 io->resp.lpc_address = get_lpc_addr_shifted(context);
326
327 return 0;
328}
329
Andrew Jefferycb935042019-03-15 09:54:33 +1030330static int protocol_v1_mark_dirty(struct mbox_context *context,
331 struct protocol_mark_dirty *io)
Andrew Jefferya336e432018-08-07 16:00:40 +0930332{
333 uint32_t offset = io->req.v1.offset;
334 uint32_t size = io->req.v1.size;
335 uint32_t off;
336
337 if (!(context->current && context->current_is_write)) {
338 MSG_ERR("Tried to call mark dirty without open write window\n");
339 return -EPERM;
340 }
341
342 /* For V1 offset given relative to flash - we want the window */
343 off = offset - ((context->current->flash_offset) >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030344 context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930345 if (off > offset) { /* Underflow - before current window */
346 MSG_ERR("Tried to mark dirty before start of window\n");
347 MSG_ERR("requested offset: 0x%x window start: 0x%x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030348 offset << context->backend.block_size_shift,
Andrew Jefferya336e432018-08-07 16:00:40 +0930349 context->current->flash_offset);
350 return -EINVAL;
351 }
352 offset = off;
353 /*
354 * We only track dirty at the block level.
355 * For protocol V1 we can get away with just marking the whole
356 * block dirty.
357 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030358 size = align_up(size, 1 << context->backend.block_size_shift);
359 size >>= context->backend.block_size_shift;
Andrew Jefferya336e432018-08-07 16:00:40 +0930360
361 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030362 offset << context->backend.block_size_shift,
363 size << context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930364
365 return window_set_bytemap(context, context->current, offset, size,
366 WINDOW_DIRTY);
367}
368
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930369static int generic_flush(struct mbox_context *context)
370{
371 int rc, i, offset, count;
372 uint8_t prev;
373
374 offset = 0;
375 count = 0;
376 prev = WINDOW_CLEAN;
377
378 MSG_INFO("Flush window @ %p for size 0x%.8x which maps flash @ 0x%.8x\n",
379 context->current->mem, context->current->size,
380 context->current->flash_offset);
381
382 /*
383 * We look for streaks of the same type and keep a count, when the type
384 * (dirty/erased) changes we perform the required action on the backing
385 * store and update the current streak-type
386 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030387 for (i = 0; i < (context->current->size >> context->backend.block_size_shift);
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930388 i++) {
389 uint8_t cur = context->current->dirty_bmap[i];
390 if (cur != WINDOW_CLEAN) {
391 if (cur == prev) { /* Same as previous block, incrmnt */
392 count++;
393 } else if (prev == WINDOW_CLEAN) { /* Start of run */
394 offset = i;
395 count++;
396 } else { /* Change in streak type */
397 rc = window_flush(context, offset, count,
398 prev);
399 if (rc < 0) {
400 return rc;
401 }
402 offset = i;
403 count = 1;
404 }
405 } else {
406 if (prev != WINDOW_CLEAN) { /* End of a streak */
407 rc = window_flush(context, offset, count,
408 prev);
409 if (rc < 0) {
410 return rc;
411 }
412 offset = 0;
413 count = 0;
414 }
415 }
416 prev = cur;
417 }
418
419 if (prev != WINDOW_CLEAN) { /* Still the last streak to write */
420 rc = window_flush(context, offset, count, prev);
421 if (rc < 0) {
422 return rc;
423 }
424 }
425
426 /* Clear the dirty bytemap since we have written back all changes */
427 return window_set_bytemap(context, context->current, 0,
428 context->current->size >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030429 context->backend.block_size_shift,
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930430 WINDOW_CLEAN);
431}
432
Andrew Jefferycb935042019-03-15 09:54:33 +1030433static int protocol_v1_flush(struct mbox_context *context,
434 struct protocol_flush *io)
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930435{
436 int rc;
437
438 if (!(context->current && context->current_is_write)) {
439 MSG_ERR("Tried to call flush without open write window\n");
440 return -EPERM;
441 }
442
443 /*
444 * For V1 the Flush command acts much the same as the dirty command
445 * except with a flush as well. Only do this on an actual flush
446 * command not when we call flush because we've implicitly closed a
447 * window because we might not have the required args in req.
448 */
Andrew Jeffery093eda52018-08-07 23:10:43 +0930449 if (io) {
450 struct protocol_mark_dirty *mdio = (void *)io;
451 rc = protocol_v1_mark_dirty(context, mdio);
452 if (rc < 0) {
453 return rc;
454 }
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930455 }
456
457 return generic_flush(context);
458}
459
Andrew Jefferycb935042019-03-15 09:54:33 +1030460static int protocol_v1_close(struct mbox_context *context,
461 struct protocol_close *io)
Andrew Jeffery093eda52018-08-07 23:10:43 +0930462{
463 int rc;
464
465 /* Close the current window if there is one */
466 if (!context->current) {
467 return 0;
468 }
469
470 /* There is an implicit flush if it was a write window */
471 if (context->current_is_write) {
472 rc = protocol_v1_flush(context, NULL);
473 if (rc < 0) {
474 MSG_ERR("Couldn't Flush Write Window\n");
475 return rc;
476 }
477 }
478
479 /* Host asked for it -> Don't set the BMC Event */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930480 windows_close_current(context, io->req.flags);
Andrew Jeffery093eda52018-08-07 23:10:43 +0930481
482 return 0;
483}
484
Andrew Jefferycb935042019-03-15 09:54:33 +1030485static int protocol_v1_ack(struct mbox_context *context,
486 struct protocol_ack *io)
Andrew Jefferyc5c83042018-08-07 23:22:05 +0930487{
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930488 return protocol_events_clear(context,
489 (io->req.flags & BMC_EVENT_ACK_MASK));
Andrew Jefferyc5c83042018-08-07 23:22:05 +0930490}
491
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930492/*
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930493 * get_suggested_timeout() - get the suggested timeout value in seconds
494 * @context: The mbox context pointer
495 *
496 * Return: Suggested timeout in seconds
497 */
498static uint16_t get_suggested_timeout(struct mbox_context *context)
499{
500 struct window_context *window = windows_find_largest(context);
501 uint32_t max_size_mb = window ? (window->size >> 20) : 0;
502 uint16_t ret;
503
504 ret = align_up(max_size_mb * FLASH_ACCESS_MS_PER_MB, 1000) / 1000;
505
506 MSG_DBG("Suggested Timeout: %us, max window size: %uMB, for %dms/MB\n",
507 ret, max_size_mb, FLASH_ACCESS_MS_PER_MB);
508 return ret;
509}
510
Andrew Jefferycb935042019-03-15 09:54:33 +1030511static int protocol_v2_get_info(struct mbox_context *context,
512 struct protocol_get_info *io)
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930513{
514 uint8_t old_version = context->version;
515 int rc;
516
517 /* Bootstrap protocol version. This may involve {up,down}grading */
518 rc = protocol_negotiate_version(context, io->req.api_version);
519 if (rc < 0)
520 return rc;
521
522 /* Do the {up,down}grade if necessary*/
523 if (rc != old_version) {
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930524 /* Doing version negotiation, don't alert host to reset */
525 windows_reset_all(context);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930526 return context->protocol->get_info(context, io);
527 }
528
529 /* Record the negotiated version for the response */
530 io->resp.api_version = rc;
531
532 /* Now do all required intialisation for v2 */
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930533
534 /* Knowing blocksize we can allocate the window dirty_bytemap */
535 windows_alloc_dirty_bytemap(context);
536
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030537 io->resp.v2.block_size_shift = context->backend.block_size_shift;
538 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
539 1 << context->backend.block_size_shift, context->backend.block_size_shift);
540
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930541 io->resp.v2.timeout = get_suggested_timeout(context);
542
543 return lpc_map_memory(context);
544}
545
Andrew Jefferycb935042019-03-15 09:54:33 +1030546static int protocol_v2_get_flash_info(struct mbox_context *context,
547 struct protocol_get_flash_info *io)
Andrew Jeffery91a87452018-08-07 14:54:14 +0930548{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030549 struct backend *backend = &context->backend;
550
Andrew Jeffery91a87452018-08-07 14:54:14 +0930551 io->resp.v2.flash_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030552 backend->flash_size >> backend->block_size_shift;
Andrew Jeffery91a87452018-08-07 14:54:14 +0930553 io->resp.v2.erase_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030554 ((1 << backend->erase_size_shift) >> backend->block_size_shift);
Andrew Jeffery91a87452018-08-07 14:54:14 +0930555
556 return 0;
557}
558
Andrew Jefferycb935042019-03-15 09:54:33 +1030559static int protocol_v2_create_window(struct mbox_context *context,
560 struct protocol_create_window *io)
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930561{
562 int rc;
563
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930564 rc = protocol_v1_create_window(context, io);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930565 if (rc < 0)
566 return rc;
567
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030568 io->resp.size = context->current->size >> context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930569 io->resp.offset = context->current->flash_offset >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030570 context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930571
572 return 0;
573}
574
Andrew Jefferycb935042019-03-15 09:54:33 +1030575static int protocol_v2_mark_dirty(struct mbox_context *context,
576 struct protocol_mark_dirty *io)
Andrew Jefferya336e432018-08-07 16:00:40 +0930577{
578 if (!(context->current && context->current_is_write)) {
579 MSG_ERR("Tried to call mark dirty without open write window\n");
580 return -EPERM;
581 }
582
583 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030584 io->req.v2.offset << context->backend.block_size_shift,
585 io->req.v2.size << context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930586
587 return window_set_bytemap(context, context->current, io->req.v2.offset,
588 io->req.v2.size, WINDOW_DIRTY);
589}
590
Andrew Jefferycb935042019-03-15 09:54:33 +1030591static int protocol_v2_erase(struct mbox_context *context,
592 struct protocol_erase *io)
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930593{
594 size_t start, len;
595 int rc;
596
597 if (!(context->current && context->current_is_write)) {
598 MSG_ERR("Tried to call erase without open write window\n");
599 return -EPERM;
600 }
601
602 MSG_INFO("Erase window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030603 io->req.offset << context->backend.block_size_shift,
604 io->req.size << context->backend.block_size_shift);
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930605
606 rc = window_set_bytemap(context, context->current, io->req.offset,
607 io->req.size, WINDOW_ERASED);
608 if (rc < 0) {
609 return rc;
610 }
611
612 /* Write 0xFF to mem -> This ensures consistency between flash & ram */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030613 start = io->req.offset << context->backend.block_size_shift;
614 len = io->req.size << context->backend.block_size_shift;
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930615 memset(context->current->mem + start, 0xFF, len);
616
617 return 0;
618}
619
Andrew Jefferycb935042019-03-15 09:54:33 +1030620static int protocol_v2_flush(struct mbox_context *context,
621 struct protocol_flush *io)
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930622{
623 if (!(context->current && context->current_is_write)) {
624 MSG_ERR("Tried to call flush without open write window\n");
625 return -EPERM;
626 }
627
628 return generic_flush(context);
629}
630
Andrew Jefferycb935042019-03-15 09:54:33 +1030631static int protocol_v2_close(struct mbox_context *context,
632 struct protocol_close *io)
Andrew Jeffery093eda52018-08-07 23:10:43 +0930633{
634 int rc;
635
636 /* Close the current window if there is one */
637 if (!context->current) {
638 return 0;
639 }
640
641 /* There is an implicit flush if it was a write window */
642 if (context->current_is_write) {
643 rc = protocol_v2_flush(context, NULL);
644 if (rc < 0) {
645 MSG_ERR("Couldn't Flush Write Window\n");
646 return rc;
647 }
648 }
649
650 /* Host asked for it -> Don't set the BMC Event */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930651 windows_close_current(context, io->req.flags);
Andrew Jeffery093eda52018-08-07 23:10:43 +0930652
653 return 0;
654}
655
Andrew Jefferycb935042019-03-15 09:54:33 +1030656static const struct protocol_ops protocol_ops_v1 = {
657 .reset = protocol_v1_reset,
658 .get_info = protocol_v1_get_info,
659 .get_flash_info = protocol_v1_get_flash_info,
660 .create_window = protocol_v1_create_window,
661 .mark_dirty = protocol_v1_mark_dirty,
662 .erase = NULL,
663 .flush = protocol_v1_flush,
664 .close = protocol_v1_close,
665 .ack = protocol_v1_ack,
666};
667
668static const struct protocol_ops protocol_ops_v2 = {
669 .reset = protocol_v1_reset,
670 .get_info = protocol_v2_get_info,
671 .get_flash_info = protocol_v2_get_flash_info,
672 .create_window = protocol_v2_create_window,
673 .mark_dirty = protocol_v2_mark_dirty,
674 .erase = protocol_v2_erase,
675 .flush = protocol_v2_flush,
676 .close = protocol_v2_close,
677 .ack = protocol_v1_ack,
678};
679
680static const struct protocol_ops *protocol_ops_map[] = {
681 [0] = NULL,
682 [1] = &protocol_ops_v1,
683 [2] = &protocol_ops_v2,
684};
685
686static int protocol_negotiate_version(struct mbox_context *context,
687 uint8_t requested)
688{
689 /* Check we support the version requested */
690 if (requested < API_MIN_VERSION)
691 return -EINVAL;
692
693 context->version = (requested > API_MAX_VERSION) ?
694 API_MAX_VERSION : requested;
695
696 context->protocol = protocol_ops_map[context->version];
697
698 return context->version;
699}
700
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930701int protocol_init(struct mbox_context *context)
702{
Andrew Jefferyc7d19472018-08-08 11:43:08 +0930703 protocol_negotiate_version(context, API_MAX_VERSION);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930704
705 return 0;
706}
707
708void protocol_free(struct mbox_context *context)
709{
710 return;
711}
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030712
713/* Don't do any state manipulation, just perform the reset */
714int __protocol_reset(struct mbox_context *context)
715{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030716 enum backend_reset_mode mode;
717 int rc;
718
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030719 windows_reset_all(context);
720
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030721 rc = backend_reset(&context->backend, context->mem, context->mem_size);
722 if (rc < 0)
723 return rc;
724
725 mode = rc;
726 if (!(mode == reset_lpc_flash || mode == reset_lpc_memory))
727 return -EINVAL;
728
729 if (mode == reset_lpc_flash)
730 return lpc_map_flash(context);
731
732 assert(mode == reset_lpc_memory);
733 return lpc_map_memory(context);
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030734}
735
736/* Prevent the host from performing actions whilst reset takes place */
737int protocol_reset(struct mbox_context *context)
738{
739 int rc;
740
741 rc = protocol_events_clear(context, BMC_EVENT_DAEMON_READY);
742 if (rc < 0) {
743 MSG_ERR("Failed to clear daemon ready state, reset failed\n");
744 return rc;
745 }
746
747 rc = __protocol_reset(context);
748 if (rc < 0) {
749 MSG_ERR("Failed to reset protocol, daemon remains not ready\n");
750 return rc;
751 }
752
753 rc = protocol_events_set(context,
754 BMC_EVENT_DAEMON_READY | BMC_EVENT_PROTOCOL_RESET);
755 if (rc < 0) {
756 MSG_ERR("Failed to set daemon ready state, daemon remains not ready\n");
757 return rc;
758 }
759
760 return 0;
761}