blob: 7158bfcd5f1bcb0790d7ea03fe5e4b725bfd950e [file] [log] [blame]
Andrew Jeffery1e531af2018-08-07 13:32:57 +09301// SPDX-License-Identifier: Apache-2.0
2// Copyright (C) 2018 IBM Corp.
3#include "config.h"
4
5#include <errno.h>
6#include <stdint.h>
Evan Lojewskif1e547c2019-03-14 14:34:33 +10307#include <stdio.h>
Andrew Jeffery1e531af2018-08-07 13:32:57 +09308
Evan Lojewskif1e547c2019-03-14 14:34:33 +10309#include "backend.h"
Andrew Jeffery5335f092018-08-09 14:56:08 +093010#include "common.h"
Andrew Jeffery1e531af2018-08-07 13:32:57 +093011#include "lpc.h"
Andrew Jefferycb935042019-03-15 09:54:33 +103012#include "mboxd.h"
13#include "protocol.h"
Andrew Jeffery1e531af2018-08-07 13:32:57 +093014#include "windows.h"
15
Andrew Jeffery26558db2018-08-10 00:22:38 +093016#define BLOCK_SIZE_SHIFT_V1 12 /* 4K */
17
Andrew Jeffery0453aa42018-08-21 08:25:46 +093018static inline uint8_t protocol_get_bmc_event_mask(struct mbox_context *context)
19{
20 if (context->version == API_VERSION_1) {
21 return BMC_EVENT_V1_MASK;
22 }
23
24 return BMC_EVENT_V2_MASK;
25}
26
Andrew Jeffery5335f092018-08-09 14:56:08 +093027/*
Andrew Jefferyfe0c9e82018-11-01 14:02:17 +103028 * protocol_events_put() - Push the full set/cleared state of BMC events on the
29 * provided transport
30 * @context: The mbox context pointer
31 * @ops: The operations struct for the transport of interest
32 *
33 * Return: 0 on success otherwise negative error code
34 */
35int protocol_events_put(struct mbox_context *context,
36 const struct transport_ops *ops)
37{
38 const uint8_t mask = protocol_get_bmc_event_mask(context);
39
40 return ops->put_events(context, mask);
41}
42
43/*
44 * protocol_events_set() - Update the set BMC events on the active transport
Andrew Jeffery5335f092018-08-09 14:56:08 +093045 * @context: The mbox context pointer
46 * @bmc_event: The bits to set
Andrew Jeffery5335f092018-08-09 14:56:08 +093047 *
48 * Return: 0 on success otherwise negative error code
49 */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +093050int protocol_events_set(struct mbox_context *context, uint8_t bmc_event)
Andrew Jeffery5335f092018-08-09 14:56:08 +093051{
Andrew Jeffery0453aa42018-08-21 08:25:46 +093052 const uint8_t mask = protocol_get_bmc_event_mask(context);
Andrew Jeffery5335f092018-08-09 14:56:08 +093053
Andrew Jeffery0453aa42018-08-21 08:25:46 +093054 /*
55 * Store the raw value, as we may up- or down- grade the protocol
56 * version and subsequently need to flush the appropriate set. Instead
57 * we pass the masked value through to the transport
58 */
59 context->bmc_events |= bmc_event;
Andrew Jeffery5335f092018-08-09 14:56:08 +093060
Andrew Jefferyf62601b2018-11-01 13:44:25 +103061 return context->transport->set_events(context, bmc_event, mask);
Andrew Jeffery5335f092018-08-09 14:56:08 +093062}
63
64/*
Andrew Jefferyfe0c9e82018-11-01 14:02:17 +103065 * protocol_events_clear() - Update the cleared BMC events on the active
66 * transport
Andrew Jeffery5335f092018-08-09 14:56:08 +093067 * @context: The mbox context pointer
68 * @bmc_event: The bits to clear
Andrew Jeffery5335f092018-08-09 14:56:08 +093069 *
70 * Return: 0 on success otherwise negative error code
71 */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +093072int protocol_events_clear(struct mbox_context *context, uint8_t bmc_event)
Andrew Jeffery5335f092018-08-09 14:56:08 +093073{
Andrew Jeffery0453aa42018-08-21 08:25:46 +093074 const uint8_t mask = protocol_get_bmc_event_mask(context);
75
76 context->bmc_events &= ~bmc_event;
77
Andrew Jefferyf62601b2018-11-01 13:44:25 +103078 return context->transport->clear_events(context, bmc_event, mask);
Andrew Jeffery5335f092018-08-09 14:56:08 +093079}
80
Evan Lojewskif1e547c2019-03-14 14:34:33 +103081static int protocol_negotiate_version(struct mbox_context *context,
82 uint8_t requested);
83
Andrew Jefferycb935042019-03-15 09:54:33 +103084static int protocol_v1_reset(struct mbox_context *context)
Andrew Jefferyab666a52018-08-07 14:28:09 +093085{
Evan Lojewskif1e547c2019-03-14 14:34:33 +103086 return __protocol_reset(context);
Andrew Jefferyab666a52018-08-07 14:28:09 +093087}
88
Andrew Jefferycb935042019-03-15 09:54:33 +103089static int protocol_negotiate_version(struct mbox_context *context,
90 uint8_t requested);
91
92static int protocol_v1_get_info(struct mbox_context *context,
93 struct protocol_get_info *io)
Andrew Jeffery1e531af2018-08-07 13:32:57 +093094{
95 uint8_t old_version = context->version;
96 int rc;
97
98 /* Bootstrap protocol version. This may involve {up,down}grading */
99 rc = protocol_negotiate_version(context, io->req.api_version);
100 if (rc < 0)
101 return rc;
102
103 /* Do the {up,down}grade if necessary*/
104 if (rc != old_version) {
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930105 /* Doing version negotiation, don't alert host to reset */
106 windows_reset_all(context);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930107 return context->protocol->get_info(context, io);
108 }
109
110 /* Record the negotiated version for the response */
111 io->resp.api_version = rc;
112
113 /* Now do all required intialisation for v1 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030114 context->backend.block_size_shift = BLOCK_SIZE_SHIFT_V1;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930115 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030116 1 << context->backend.block_size_shift, context->backend.block_size_shift);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930117
118 /* Knowing blocksize we can allocate the window dirty_bytemap */
119 windows_alloc_dirty_bytemap(context);
120
121 io->resp.v1.read_window_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030122 context->windows.default_size >> context->backend.block_size_shift;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930123 io->resp.v1.write_window_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030124 context->windows.default_size >> context->backend.block_size_shift;
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930125
126 return lpc_map_memory(context);
127}
128
Andrew Jefferycb935042019-03-15 09:54:33 +1030129static int protocol_v1_get_flash_info(struct mbox_context *context,
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030130 struct protocol_get_flash_info *io)
Andrew Jeffery91a87452018-08-07 14:54:14 +0930131{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030132 io->resp.v1.flash_size = context->backend.flash_size;
133 io->resp.v1.erase_size = 1 << context->backend.erase_size_shift;
Andrew Jeffery91a87452018-08-07 14:54:14 +0930134
135 return 0;
136}
137
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930138/*
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930139 * get_lpc_addr_shifted() - Get lpc address of the current window
140 * @context: The mbox context pointer
141 *
142 * Return: The lpc address to access that offset shifted by block size
143 */
144static inline uint16_t get_lpc_addr_shifted(struct mbox_context *context)
145{
146 uint32_t lpc_addr, mem_offset;
147
148 /* Offset of the current window in the reserved memory region */
149 mem_offset = context->current->mem - context->mem;
150 /* Total LPC Address */
151 lpc_addr = context->lpc_base + mem_offset;
152
153 MSG_DBG("LPC address of current window: 0x%.8x\n", lpc_addr);
154
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030155 return lpc_addr >> context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930156}
157
Andrew Jefferycb935042019-03-15 09:54:33 +1030158static int protocol_v1_create_window(struct mbox_context *context,
159 struct protocol_create_window *io)
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930160{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030161 struct backend *backend = &context->backend;
162 uint32_t offset;
163 uint32_t size;
Andrew Jefferycb935042019-03-15 09:54:33 +1030164 int rc;
165
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030166 offset = io->req.offset << backend->block_size_shift;
167 size = io->req.size << backend->block_size_shift;
168 rc = backend_validate(backend, offset, size, io->req.ro);
Andrew Jefferycb935042019-03-15 09:54:33 +1030169 if (rc < 0) {
170 /* Backend does not allow window to be created. */
171 return rc;
172 }
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930173
174 /* Close the current window if there is one */
175 if (context->current) {
Andrew Jefferyf21c81c2018-08-09 13:57:46 +0930176 /* There is an implicit flush if it was a write window
177 *
178 * protocol_v2_create_window() calls
179 * protocol_v1_create_window(), so use indirect call to
180 * write_flush() to make sure we pick the right one.
181 */
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930182 if (context->current_is_write) {
Andrew Jefferyf21c81c2018-08-09 13:57:46 +0930183 rc = context->protocol->flush(context, NULL);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930184 if (rc < 0) {
185 MSG_ERR("Couldn't Flush Write Window\n");
186 return rc;
187 }
188 }
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930189 windows_close_current(context, FLAGS_NONE);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930190 }
191
192 /* Offset the host has requested */
193 MSG_INFO("Host requested flash @ 0x%.8x\n", offset);
194 /* Check if we have an existing window */
195 context->current = windows_search(context, offset,
196 context->version == API_VERSION_1);
197
198 if (!context->current) { /* No existing window */
199 MSG_DBG("No existing window which maps that flash offset\n");
200 rc = windows_create_map(context, &context->current,
201 offset,
202 context->version == API_VERSION_1);
203 if (rc < 0) { /* Unable to map offset */
204 MSG_ERR("Couldn't create window mapping for offset 0x%.8x\n",
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930205 offset);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930206 return rc;
207 }
208 }
209
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930210 context->current_is_write = !io->req.ro;
211
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930212 MSG_INFO("Window @ %p for size 0x%.8x maps flash offset 0x%.8x\n",
213 context->current->mem, context->current->size,
214 context->current->flash_offset);
215
216 io->resp.lpc_address = get_lpc_addr_shifted(context);
217
218 return 0;
219}
220
Andrew Jefferycb935042019-03-15 09:54:33 +1030221static int protocol_v1_mark_dirty(struct mbox_context *context,
222 struct protocol_mark_dirty *io)
Andrew Jefferya336e432018-08-07 16:00:40 +0930223{
224 uint32_t offset = io->req.v1.offset;
225 uint32_t size = io->req.v1.size;
226 uint32_t off;
227
228 if (!(context->current && context->current_is_write)) {
229 MSG_ERR("Tried to call mark dirty without open write window\n");
230 return -EPERM;
231 }
232
233 /* For V1 offset given relative to flash - we want the window */
234 off = offset - ((context->current->flash_offset) >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030235 context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930236 if (off > offset) { /* Underflow - before current window */
237 MSG_ERR("Tried to mark dirty before start of window\n");
238 MSG_ERR("requested offset: 0x%x window start: 0x%x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030239 offset << context->backend.block_size_shift,
Andrew Jefferya336e432018-08-07 16:00:40 +0930240 context->current->flash_offset);
241 return -EINVAL;
242 }
243 offset = off;
244 /*
245 * We only track dirty at the block level.
246 * For protocol V1 we can get away with just marking the whole
247 * block dirty.
248 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030249 size = align_up(size, 1 << context->backend.block_size_shift);
250 size >>= context->backend.block_size_shift;
Andrew Jefferya336e432018-08-07 16:00:40 +0930251
252 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030253 offset << context->backend.block_size_shift,
254 size << context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930255
256 return window_set_bytemap(context, context->current, offset, size,
257 WINDOW_DIRTY);
258}
259
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930260static int generic_flush(struct mbox_context *context)
261{
262 int rc, i, offset, count;
263 uint8_t prev;
264
265 offset = 0;
266 count = 0;
267 prev = WINDOW_CLEAN;
268
269 MSG_INFO("Flush window @ %p for size 0x%.8x which maps flash @ 0x%.8x\n",
270 context->current->mem, context->current->size,
271 context->current->flash_offset);
272
273 /*
274 * We look for streaks of the same type and keep a count, when the type
275 * (dirty/erased) changes we perform the required action on the backing
276 * store and update the current streak-type
277 */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030278 for (i = 0; i < (context->current->size >> context->backend.block_size_shift);
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930279 i++) {
280 uint8_t cur = context->current->dirty_bmap[i];
281 if (cur != WINDOW_CLEAN) {
282 if (cur == prev) { /* Same as previous block, incrmnt */
283 count++;
284 } else if (prev == WINDOW_CLEAN) { /* Start of run */
285 offset = i;
286 count++;
287 } else { /* Change in streak type */
288 rc = window_flush(context, offset, count,
289 prev);
290 if (rc < 0) {
291 return rc;
292 }
293 offset = i;
294 count = 1;
295 }
296 } else {
297 if (prev != WINDOW_CLEAN) { /* End of a streak */
298 rc = window_flush(context, offset, count,
299 prev);
300 if (rc < 0) {
301 return rc;
302 }
303 offset = 0;
304 count = 0;
305 }
306 }
307 prev = cur;
308 }
309
310 if (prev != WINDOW_CLEAN) { /* Still the last streak to write */
311 rc = window_flush(context, offset, count, prev);
312 if (rc < 0) {
313 return rc;
314 }
315 }
316
317 /* Clear the dirty bytemap since we have written back all changes */
318 return window_set_bytemap(context, context->current, 0,
319 context->current->size >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030320 context->backend.block_size_shift,
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930321 WINDOW_CLEAN);
322}
323
Andrew Jefferycb935042019-03-15 09:54:33 +1030324static int protocol_v1_flush(struct mbox_context *context,
325 struct protocol_flush *io)
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930326{
327 int rc;
328
329 if (!(context->current && context->current_is_write)) {
330 MSG_ERR("Tried to call flush without open write window\n");
331 return -EPERM;
332 }
333
334 /*
335 * For V1 the Flush command acts much the same as the dirty command
336 * except with a flush as well. Only do this on an actual flush
337 * command not when we call flush because we've implicitly closed a
338 * window because we might not have the required args in req.
339 */
Andrew Jeffery093eda52018-08-07 23:10:43 +0930340 if (io) {
341 struct protocol_mark_dirty *mdio = (void *)io;
342 rc = protocol_v1_mark_dirty(context, mdio);
343 if (rc < 0) {
344 return rc;
345 }
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930346 }
347
348 return generic_flush(context);
349}
350
Andrew Jefferycb935042019-03-15 09:54:33 +1030351static int protocol_v1_close(struct mbox_context *context,
352 struct protocol_close *io)
Andrew Jeffery093eda52018-08-07 23:10:43 +0930353{
354 int rc;
355
356 /* Close the current window if there is one */
357 if (!context->current) {
358 return 0;
359 }
360
361 /* There is an implicit flush if it was a write window */
362 if (context->current_is_write) {
363 rc = protocol_v1_flush(context, NULL);
364 if (rc < 0) {
365 MSG_ERR("Couldn't Flush Write Window\n");
366 return rc;
367 }
368 }
369
370 /* Host asked for it -> Don't set the BMC Event */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930371 windows_close_current(context, io->req.flags);
Andrew Jeffery093eda52018-08-07 23:10:43 +0930372
373 return 0;
374}
375
Andrew Jefferycb935042019-03-15 09:54:33 +1030376static int protocol_v1_ack(struct mbox_context *context,
377 struct protocol_ack *io)
Andrew Jefferyc5c83042018-08-07 23:22:05 +0930378{
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930379 return protocol_events_clear(context,
380 (io->req.flags & BMC_EVENT_ACK_MASK));
Andrew Jefferyc5c83042018-08-07 23:22:05 +0930381}
382
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930383/*
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930384 * get_suggested_timeout() - get the suggested timeout value in seconds
385 * @context: The mbox context pointer
386 *
387 * Return: Suggested timeout in seconds
388 */
389static uint16_t get_suggested_timeout(struct mbox_context *context)
390{
391 struct window_context *window = windows_find_largest(context);
392 uint32_t max_size_mb = window ? (window->size >> 20) : 0;
393 uint16_t ret;
394
395 ret = align_up(max_size_mb * FLASH_ACCESS_MS_PER_MB, 1000) / 1000;
396
397 MSG_DBG("Suggested Timeout: %us, max window size: %uMB, for %dms/MB\n",
398 ret, max_size_mb, FLASH_ACCESS_MS_PER_MB);
399 return ret;
400}
401
Andrew Jefferycb935042019-03-15 09:54:33 +1030402static int protocol_v2_get_info(struct mbox_context *context,
403 struct protocol_get_info *io)
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930404{
405 uint8_t old_version = context->version;
406 int rc;
407
408 /* Bootstrap protocol version. This may involve {up,down}grading */
409 rc = protocol_negotiate_version(context, io->req.api_version);
410 if (rc < 0)
411 return rc;
412
413 /* Do the {up,down}grade if necessary*/
414 if (rc != old_version) {
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930415 /* Doing version negotiation, don't alert host to reset */
416 windows_reset_all(context);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930417 return context->protocol->get_info(context, io);
418 }
419
420 /* Record the negotiated version for the response */
421 io->resp.api_version = rc;
422
423 /* Now do all required intialisation for v2 */
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930424
425 /* Knowing blocksize we can allocate the window dirty_bytemap */
426 windows_alloc_dirty_bytemap(context);
427
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030428 io->resp.v2.block_size_shift = context->backend.block_size_shift;
429 MSG_INFO("Block Size: 0x%.8x (shift: %u)\n",
430 1 << context->backend.block_size_shift, context->backend.block_size_shift);
431
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930432 io->resp.v2.timeout = get_suggested_timeout(context);
433
434 return lpc_map_memory(context);
435}
436
Andrew Jefferycb935042019-03-15 09:54:33 +1030437static int protocol_v2_get_flash_info(struct mbox_context *context,
438 struct protocol_get_flash_info *io)
Andrew Jeffery91a87452018-08-07 14:54:14 +0930439{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030440 struct backend *backend = &context->backend;
441
Andrew Jeffery91a87452018-08-07 14:54:14 +0930442 io->resp.v2.flash_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030443 backend->flash_size >> backend->block_size_shift;
Andrew Jeffery91a87452018-08-07 14:54:14 +0930444 io->resp.v2.erase_size =
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030445 ((1 << backend->erase_size_shift) >> backend->block_size_shift);
Andrew Jeffery91a87452018-08-07 14:54:14 +0930446
447 return 0;
448}
449
Andrew Jefferycb935042019-03-15 09:54:33 +1030450static int protocol_v2_create_window(struct mbox_context *context,
451 struct protocol_create_window *io)
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930452{
453 int rc;
454
Andrew Jeffery4bcec8e2018-08-07 15:33:41 +0930455 rc = protocol_v1_create_window(context, io);
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930456 if (rc < 0)
457 return rc;
458
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030459 io->resp.size = context->current->size >> context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930460 io->resp.offset = context->current->flash_offset >>
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030461 context->backend.block_size_shift;
Andrew Jeffery22fa5002018-08-07 15:22:50 +0930462
463 return 0;
464}
465
Andrew Jefferycb935042019-03-15 09:54:33 +1030466static int protocol_v2_mark_dirty(struct mbox_context *context,
467 struct protocol_mark_dirty *io)
Andrew Jefferya336e432018-08-07 16:00:40 +0930468{
469 if (!(context->current && context->current_is_write)) {
470 MSG_ERR("Tried to call mark dirty without open write window\n");
471 return -EPERM;
472 }
473
474 MSG_INFO("Dirty window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030475 io->req.v2.offset << context->backend.block_size_shift,
476 io->req.v2.size << context->backend.block_size_shift);
Andrew Jefferya336e432018-08-07 16:00:40 +0930477
478 return window_set_bytemap(context, context->current, io->req.v2.offset,
479 io->req.v2.size, WINDOW_DIRTY);
480}
481
Andrew Jefferycb935042019-03-15 09:54:33 +1030482static int protocol_v2_erase(struct mbox_context *context,
483 struct protocol_erase *io)
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930484{
485 size_t start, len;
486 int rc;
487
488 if (!(context->current && context->current_is_write)) {
489 MSG_ERR("Tried to call erase without open write window\n");
490 return -EPERM;
491 }
492
493 MSG_INFO("Erase window @ 0x%.8x for 0x%.8x\n",
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030494 io->req.offset << context->backend.block_size_shift,
495 io->req.size << context->backend.block_size_shift);
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930496
497 rc = window_set_bytemap(context, context->current, io->req.offset,
498 io->req.size, WINDOW_ERASED);
499 if (rc < 0) {
500 return rc;
501 }
502
503 /* Write 0xFF to mem -> This ensures consistency between flash & ram */
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030504 start = io->req.offset << context->backend.block_size_shift;
505 len = io->req.size << context->backend.block_size_shift;
Andrew Jeffery62a3daa2018-08-07 22:30:32 +0930506 memset(context->current->mem + start, 0xFF, len);
507
508 return 0;
509}
510
Andrew Jefferycb935042019-03-15 09:54:33 +1030511static int protocol_v2_flush(struct mbox_context *context,
512 struct protocol_flush *io)
Andrew Jeffery9b920cf2018-08-07 22:49:19 +0930513{
514 if (!(context->current && context->current_is_write)) {
515 MSG_ERR("Tried to call flush without open write window\n");
516 return -EPERM;
517 }
518
519 return generic_flush(context);
520}
521
Andrew Jefferycb935042019-03-15 09:54:33 +1030522static int protocol_v2_close(struct mbox_context *context,
523 struct protocol_close *io)
Andrew Jeffery093eda52018-08-07 23:10:43 +0930524{
525 int rc;
526
527 /* Close the current window if there is one */
528 if (!context->current) {
529 return 0;
530 }
531
532 /* There is an implicit flush if it was a write window */
533 if (context->current_is_write) {
534 rc = protocol_v2_flush(context, NULL);
535 if (rc < 0) {
536 MSG_ERR("Couldn't Flush Write Window\n");
537 return rc;
538 }
539 }
540
541 /* Host asked for it -> Don't set the BMC Event */
Andrew Jeffery2ebfd202018-08-20 11:46:28 +0930542 windows_close_current(context, io->req.flags);
Andrew Jeffery093eda52018-08-07 23:10:43 +0930543
544 return 0;
545}
546
Andrew Jefferycb935042019-03-15 09:54:33 +1030547static const struct protocol_ops protocol_ops_v1 = {
548 .reset = protocol_v1_reset,
549 .get_info = protocol_v1_get_info,
550 .get_flash_info = protocol_v1_get_flash_info,
551 .create_window = protocol_v1_create_window,
552 .mark_dirty = protocol_v1_mark_dirty,
553 .erase = NULL,
554 .flush = protocol_v1_flush,
555 .close = protocol_v1_close,
556 .ack = protocol_v1_ack,
557};
558
559static const struct protocol_ops protocol_ops_v2 = {
560 .reset = protocol_v1_reset,
561 .get_info = protocol_v2_get_info,
562 .get_flash_info = protocol_v2_get_flash_info,
563 .create_window = protocol_v2_create_window,
564 .mark_dirty = protocol_v2_mark_dirty,
565 .erase = protocol_v2_erase,
566 .flush = protocol_v2_flush,
567 .close = protocol_v2_close,
568 .ack = protocol_v1_ack,
569};
570
571static const struct protocol_ops *protocol_ops_map[] = {
572 [0] = NULL,
573 [1] = &protocol_ops_v1,
574 [2] = &protocol_ops_v2,
575};
576
577static int protocol_negotiate_version(struct mbox_context *context,
578 uint8_t requested)
579{
580 /* Check we support the version requested */
581 if (requested < API_MIN_VERSION)
582 return -EINVAL;
583
584 context->version = (requested > API_MAX_VERSION) ?
585 API_MAX_VERSION : requested;
586
587 context->protocol = protocol_ops_map[context->version];
588
589 return context->version;
590}
591
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930592int protocol_init(struct mbox_context *context)
593{
Andrew Jefferyc7d19472018-08-08 11:43:08 +0930594 protocol_negotiate_version(context, API_MAX_VERSION);
Andrew Jeffery1e531af2018-08-07 13:32:57 +0930595
596 return 0;
597}
598
599void protocol_free(struct mbox_context *context)
600{
601 return;
602}
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030603
604/* Don't do any state manipulation, just perform the reset */
605int __protocol_reset(struct mbox_context *context)
606{
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030607 enum backend_reset_mode mode;
608 int rc;
609
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030610 windows_reset_all(context);
611
Evan Lojewskif1e547c2019-03-14 14:34:33 +1030612 rc = backend_reset(&context->backend, context->mem, context->mem_size);
613 if (rc < 0)
614 return rc;
615
616 mode = rc;
617 if (!(mode == reset_lpc_flash || mode == reset_lpc_memory))
618 return -EINVAL;
619
620 if (mode == reset_lpc_flash)
621 return lpc_map_flash(context);
622
623 assert(mode == reset_lpc_memory);
624 return lpc_map_memory(context);
Andrew Jefferyf69760d2019-03-14 16:54:13 +1030625}
626
627/* Prevent the host from performing actions whilst reset takes place */
628int protocol_reset(struct mbox_context *context)
629{
630 int rc;
631
632 rc = protocol_events_clear(context, BMC_EVENT_DAEMON_READY);
633 if (rc < 0) {
634 MSG_ERR("Failed to clear daemon ready state, reset failed\n");
635 return rc;
636 }
637
638 rc = __protocol_reset(context);
639 if (rc < 0) {
640 MSG_ERR("Failed to reset protocol, daemon remains not ready\n");
641 return rc;
642 }
643
644 rc = protocol_events_set(context,
645 BMC_EVENT_DAEMON_READY | BMC_EVENT_PROTOCOL_RESET);
646 if (rc < 0) {
647 MSG_ERR("Failed to set daemon ready state, daemon remains not ready\n");
648 return rc;
649 }
650
651 return 0;
652}