aura  0.1
 All Data Structures Functions Variables Modules Pages
transport-nmc.c
1 #include <aura/aura.h>
2 #include <aura/private.h>
3 #include <linux/easynmc.h>
4 #include <easynmc.h>
5 #include <stdint.h>
6 #include <sys/epoll.h>
7 #include <termios.h>
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <ion/ion.h>
11 
12 #define MAGIC 0xbeefbabe
13 #define AURA_MAGIC_HDR 0xdeadf00d
14 #define AURA_OBJECT_EVENT 0xdeadc0de
15 #define AURA_OBJECT_METHOD 0xdeadbeaf
16 #define AURA_STRLEN 16
17 
18 struct nmc_aura_object {
19  uint32_t type;
20  uint32_t id;
21  uint32_t name[AURA_STRLEN];
22  uint32_t argfmt[AURA_STRLEN];
23  uint32_t retfmt[AURA_STRLEN];
24  uint32_t ptr;
25 };
26 
27 
28 struct nmc_aura_header {
29  uint32_t magic;
30  uint32_t strlen;
31 };
32 
33 enum {
34  SYNCBUF_IDLE = 0,
35  SYNCBUF_ARGOUT = 1,
36  SYNCBUF_RETIN = 2
37 };
38 
39 struct nmc_aura_syncbuffer {
40  uint32_t state;
41  uint32_t id;
42  uint32_t inbound_buffer_ptr;
43  uint32_t outbound_buffer_ptr;
44 };
45 
46 struct nmc_export_table {
47  struct nmc_aura_header hdr;
48  struct nmc_aura_object objs[];
49 };
50 
51 enum {
52  NMC_EVENT_STDIO=1<<0,
53  NMC_EVENT_TRANSPORT=1<<1
54 };
55 
56 struct ion_buffer_descriptor {
57  int map_fd;
58  int share_fd;
59  int size;
60  ion_user_handle_t hndl;
61  struct aura_buffer buf;
62 };
63 
64 
65 struct nmc_private {
66  struct easynmc_handle *h;
67  uint32_t eaddr;
68  uint32_t esz;
69  uint32_t ep;
70  struct aura_node *node;
71  int is_online;
72  uint32_t flags;
73  struct aura_buffer *writing;
74  struct aura_buffer *reading;
75  int inbufsize;
76  struct nmc_aura_syncbuffer *sbuf;
77  int ion_fd;
78  struct aura_buffer *current_in;
79  struct aura_buffer *current_out;
80  struct aura_export_table *etbl;
81 };
82 
83 static char *nmc_fetch_str(void *nmstr)
84 {
85  uint32_t *n = nmstr;
86  int len=0;
87  while (n[len++]);;
88  uint8_t *tmp = malloc(len);
89  char *ret = (char *) tmp;
90  if (!tmp)
91  return NULL;
92  do {
93  *tmp++ = (uint8_t) (*n++ & 0xff);
94  } while (*n);
95  *tmp = 0x0;
96  return ret;
97 }
98 
99 static int handle_aura_rpc_section(struct easynmc_handle *h, char* name, FILE *rfd, GElf_Shdr shdr)
100 {
101  struct nmc_private *pv = easynmc_userdata_get(h);
102 
103  if (strcmp(name, ".aura_rpc_exports") == 0) {
104  uint32_t addr = shdr.sh_addr << 2;
105  uint32_t size = shdr.sh_size;
106 
107  slog(4, SLOG_DEBUG, "transport-nmc: Found RPC export section: %s at offset %u len %u",
108  name, addr, size);
109 
110  if (!size) {
111  slog(4, SLOG_ERROR, "transport-nmc: RPC export section is empty!",
112  name, pv->eaddr, pv->esz);
113  return 1;
114  }
115  pv->eaddr = addr;
116  pv->esz = size;
117  return 1;
118  }
119 
120  if (strcmp(name, ".aura_rpc_syncbuf") == 0) {
121  uint32_t addr = shdr.sh_addr << 2;
122  uint32_t size = shdr.sh_size;
123 
124  slog(4, SLOG_DEBUG, "transport-nmc: Found sync buffer: %s at offset %u len %u",
125  name, addr, size);
126  if (!size) {
127  slog(4, SLOG_ERROR, "transport-nmc: RPC export section is empty!",
128  name, pv->eaddr, pv->esz);
129  return 1;
130  }
131 
132  pv->sbuf = (struct nmc_aura_syncbuffer*) &pv->h->imem[addr];
133  }
134 
135  return 0;
136 }
137 
138 static int parse_and_register_exports(struct nmc_private *pv)
139 {
140  struct nmc_aura_object *cur;
141  struct nmc_export_table *tbl = (struct nmc_export_table *) &pv->h->imem[pv->eaddr];
142  struct aura_export_table *etbl;
143  struct aura_node *node = pv->node;
144  int count=0;
145  int i;
146  int max_buf_sz = 0;
147 
148  if (tbl->hdr.magic != AURA_MAGIC_HDR) {
149  slog (0, SLOG_ERROR, "transport-nmc: Bad rpc magic");
150  return -EIO;
151  }
152 
153  if (tbl->hdr.strlen != AURA_STRLEN) {
154  slog (0, SLOG_ERROR, "transport-nmc: strlen mismatch: %d vs %d",
155  tbl->hdr.strlen, AURA_STRLEN);
156  return -EIO;
157  }
158 
159  slog(4, SLOG_DEBUG, "transport-nmc: parsing objects");
160 
161  cur = tbl->objs;
162  while (cur->type != 0) {
163  if ((cur->type != AURA_OBJECT_METHOD) &&
164  (cur->type != AURA_OBJECT_EVENT))
165  break;
166  count++;
167  cur++;
168  }
169 
170  etbl = aura_etable_create(node, count);
171  if (!etbl)
172  BUG(node, "Failed to create etable");
173 
174  cur = tbl->objs;
175  while (cur->type != 0) {
176  char *type = NULL;
177  if (cur->type == AURA_OBJECT_METHOD)
178  type = "method";
179  if (cur->type == AURA_OBJECT_EVENT)
180  type = "event";
181  if (!type) {
182  slog(0, SLOG_WARN, "transport-nmc: Bad data in object section, load stopped");
183  break;
184  }
185 
186  char *name = nmc_fetch_str(cur->name);
187  char *arg = nmc_fetch_str(cur->argfmt);
188  char *ret = nmc_fetch_str(cur->retfmt);
189 
190  aura_etable_add(etbl, name, arg, ret);
191 
192  slog(4, SLOG_DEBUG, "transport-nmc: %s name %s (%s : %s)",
193  type, name, arg, ret);
194 
195  free(name);
196  free(arg);
197  free(ret);
198  cur++;
199  }
200 
201  for (i=0; i<etbl->next; i++) {
202  struct aura_object *o = &etbl->objects[i];
203  if (o->retlen > max_buf_sz)
204  max_buf_sz = o->retlen;
205  }
206 
207  pv->inbufsize = max_buf_sz;
208  pv->etbl = etbl;
209 
210  return 0;
211 }
212 
213 static struct easynmc_section_filter rpc_filter = {
214  .name = "aura_rpc_exports",
215  .handle_section = handle_aura_rpc_section
216 };
217 
218 static void nonblock(int fd, int state)
219 {
220  struct termios ttystate;
221 
222  tcgetattr(fd, &ttystate);
223  if (state==1)
224  {
225  ttystate.c_lflag &= ~(ICANON|ECHO);
226  ttystate.c_cc[VTIME] = 0; /* inter-character timer unused */
227  ttystate.c_cc[VMIN] = 0; /* We're non-blocking */
228 
229  }
230  else if (state==0)
231  {
232  ttystate.c_lflag |= ICANON | ECHO;
233  }
234 
235  tcsetattr(fd, TCSANOW, &ttystate);
236 
237 }
238 
239 
240 static int nmc_open(struct aura_node *node, const char *filepath)
241 {
242  int ret = -ENOMEM;
243  struct easynmc_handle *h = easynmc_open(EASYNMC_CORE_ANY);
244  if (!h)
245  return -EIO;
246 
247  struct nmc_private *pv = calloc(1, sizeof(*pv));
248  if (!pv) {
249  ret = -ENOMEM;
250  goto errclose;
251  }
252 
253  pv->ion_fd = ion_open();
254 
255  if (pv->ion_fd < 0) {
256  slog(0, SLOG_ERROR, "Failed to init ion");
257  goto errfreemem;
258  }
259 
260  if ((sizeof(struct aura_buffer) % 4))
261  BUG(node, "Internal BUG: aura_buffer header must be 4-byte aligned")
262 
263  pv->node = node;
264  pv->h = h;
265 
266  easynmc_userdata_set(h, pv);
267 
268  easynmc_register_section_filter(h, &rpc_filter);
269 
270  ret = easynmc_load_abs(h, filepath, &pv->ep, ABSLOAD_FLAG_DEFAULT);
271  if ((ret != 0) || (!pv->eaddr)) {
272  slog(0, SLOG_ERROR, "transport-nmc: abs file doesn't have a valid aura_rpc_exports section");
273  ret = -EIO;
274  goto errfreemem;
275  }
276 
277  ret = parse_and_register_exports(pv);
278  if (ret != 0)
279  goto errfreemem;
280 
281  aura_set_userdata(node, pv);
282 
283  nonblock(h->iofd, 1);
284 
285  aura_add_pollfds(node, h->iofd, (EPOLLIN | EPOLLET));
286  aura_add_pollfds(node, h->memfd, (EPOLLPRI | EPOLLET));
287 
288  easynmc_start_app(h, pv->ep);
289  return 0;
290 
291 errfreemem:
292  free(pv);
293 errclose:
294  easynmc_close(h);
295  return ret;
296 }
297 
298 static void fetch_stdout(struct aura_node *node)
299 {
300  struct nmc_private *pv = aura_get_userdata(node);
301  while (1) {
302  char tmp[128];
303  int count;
304  slog(4, SLOG_DEBUG, "transport-nmc: We can read stdout!");
305  count = read(pv->h->iofd, tmp, 128);
306  if (count <= 0)
307  break; /* Fuckup? Try later! */
308  fwrite(tmp, count, 1, stdout);
309  if (count < 128) /* No more data */
310  break;
311  }
312 
313 }
314 
315 static void nmc_close(struct aura_node *node)
316 {
317  struct nmc_private *pv = aura_get_userdata(node);
318  /*
319  TODO: Uncomment when driver's fixed
320  fetch_stdout(node);
321  */
322 
323  aura_del_pollfds(node, pv->h->iofd);
324  aura_del_pollfds(node, pv->h->memfd);
325  easynmc_close(pv->h);
326  if (pv->current_out)
327  aura_buffer_release(pv->current_out);
328  if (pv->current_in)
329  aura_buffer_release(pv->current_in);
330  free(pv);
331 }
332 
333 static uint32_t aura_buffer_to_nmc(struct aura_buffer *buf)
334 {
335  struct aura_node *node = buf->owner;
336  struct nmc_private *pv = aura_get_userdata(node);
337  struct ion_buffer_descriptor *dsc = container_of(buf, struct ion_buffer_descriptor, buf);
338  uint32_t nmaddress;
339 
340  if (dsc->share_fd == -1) { /* Already shared? */
341  int ret = ion_share(pv->ion_fd, dsc->hndl, &dsc->share_fd);
342  if (ret)
343  BUG(node, "ion_share() failed");
344  }
345 
346  nmaddress = easynmc_ion2nmc(pv->h, dsc->share_fd);
347  if (!nmaddress)
348  BUG(node, "Failed to obtain nm address handle");
349 
350  return nmaddress;
351 }
352 
353 static inline void do_issue_next_call(struct aura_node *node)
354 {
355  struct aura_buffer *in_buf, *out_buf;
356  struct nmc_private *pv = aura_get_userdata(node);
357  struct aura_object *o;
358 
359  out_buf = aura_dequeue_buffer(&node->outbound_buffers);
360  if (!out_buf)
361  return;
362 
363  o = out_buf->object;
364  in_buf = aura_buffer_request(node, o->retlen);
365  if (!in_buf)
366  BUG(node, "Buffer allocation faield");
367 
368  in_buf->object = o;
369  pv->current_out = out_buf;
370  pv->current_in = in_buf;
371  pv->sbuf->id = o->id;
372 
373  pv->sbuf->outbound_buffer_ptr = aura_buffer_to_nmc(out_buf);
374  pv->sbuf->inbound_buffer_ptr = aura_buffer_to_nmc(in_buf);
375  pv->sbuf->state = SYNCBUF_ARGOUT;
376 }
377 
378 static void nmc_loop(struct aura_node *node, const struct aura_pollfds *fd)
379 {
380  struct nmc_private *pv = aura_get_userdata(node);
381  struct easynmc_handle *h = pv->h;
382 
383  /* Handle state changes */
384  if (!pv->is_online && (easynmc_core_state(h) == EASYNMC_CORE_RUNNING)) {
385  aura_etable_activate(pv->etbl);
386  aura_set_status(node, AURA_STATUS_ONLINE);
387  pv->is_online++;
388  };
389 
390  if (pv->is_online && (easynmc_core_state(h) != EASYNMC_CORE_RUNNING)) {
391  aura_set_status(node, AURA_STATUS_OFFLINE);
392  pv->is_online = 0;
393  };
394 
395  if (fd && (fd->fd == pv->h->iofd))
396  fetch_stdout(node);
397 
398  if (pv->sbuf)
399  switch (pv->sbuf->state) {
400  case SYNCBUF_IDLE:
401  slog(4, SLOG_DEBUG, "transport-nmc: We can issue our call");
402  do_issue_next_call(node);
403  break;
404  case SYNCBUF_ARGOUT:
405  slog(4, SLOG_DEBUG, "transport-nmc: NMC is still working");
406  break;
407  case SYNCBUF_RETIN:
408  aura_buffer_release(pv->current_out);
409  aura_queue_buffer(&node->inbound_buffers, pv->current_in);
410  pv->current_out = NULL;
411  pv->current_in = NULL;
412  pv->sbuf->state = SYNCBUF_IDLE;
413  slog(4, SLOG_DEBUG, "transport-nmc: We got something from nmc");
414  break;
415  default:
416  BUG(node, "Unexpected syncbuf state");
417  break;
418  }
419 }
420 
421 struct aura_buffer *ion_buffer_request(struct aura_node *node, int size)
422 {
423  int ret;
424  int map_fd;
425  ion_user_handle_t hndl;
426  struct nmc_private *pv = aura_get_userdata(node);
427 
428  struct ion_buffer_descriptor *dsc = malloc(sizeof(*dsc));
429  if (!dsc)
430  BUG(node, "malloc failed!");
431 
432  ret = ion_alloc(pv->ion_fd, size, 0x10, 0xf, 0, &hndl);
433  if (ret)
434  BUG(node, "ION allocation of %d bytes failed: %d", size, ret);
435 
436  ret = ion_map(pv->ion_fd, hndl, size, (PROT_READ | PROT_WRITE),
437  MAP_SHARED, 0, (void *) &dsc->buf.data, &map_fd);
438  if (ret)
439  BUG(node, "ION mmap failed");
440 
441  dsc->map_fd = map_fd;
442  dsc->hndl = hndl;
443  dsc->size = size;
444  dsc->share_fd = -1;
445  return &dsc->buf;
446 }
447 
448 static void ion_buffer_release(struct aura_buffer *buf)
449 {
450  struct aura_node *node = buf->owner;
451  struct nmc_private *pv = aura_get_userdata(node);
452  struct ion_buffer_descriptor *dsc = container_of(buf, struct ion_buffer_descriptor, buf);
453  int ret;
454 
455  munmap(dsc->buf.data, dsc->size);
456 
457  ret = ion_free(pv->ion_fd, dsc->hndl);
458  if (ret)
459  BUG(node, "Shit happened when doing ion_free(): %d", ret);
460 
461  close(dsc->map_fd);
462  free(dsc);
463 }
464 
465 /*
466  * TODO:
467  * This buffer passing mechanism is very hacky and will not work on 64 bit
468  * A total refactor is required, perhaps altering the aura core
469  */
470 
471 void nmc_buffer_put(struct aura_buffer *dst, struct aura_buffer *buf)
472 {
473  if (sizeof(void*) != 4)
474  BUG(dst->owner, "You know why we are screwed here, jerk!");
475 
476  uint64_t buf_addrs = aura_buffer_to_nmc(buf);
477  buf_addrs |= (((uint64_t) (uintptr_t) buf) << 32);
478  aura_buffer_put_u64(dst, buf_addrs);
479  slog(4, SLOG_DEBUG, "nmc: serialized buf 0x%x to 0x%llx ", buf, buf_addrs);
480 }
481 
482 struct aura_buffer *nmc_buffer_get(struct aura_buffer *buf)
483 {
484  uint64_t addrs = aura_buffer_get_u64(buf);
485  struct aura_buffer *ret = (struct aura_buffer *) ((uintptr_t) (addrs >> 32));
486  slog(4, SLOG_DEBUG, "nmc: deserialized buf 0x%x from 0x%llx ", buf, addrs);
487  return ret;
488 }
489 
490 static struct aura_transport nmc = {
491  .name = "nmc",
492  .open = nmc_open,
493  .close = nmc_close,
494  .loop = nmc_loop,
495  .buffer_offset = 0,
496  .buffer_overhead =0,
497  .buffer_request = ion_buffer_request,
498  .buffer_release = ion_buffer_release,
499  .buffer_get = nmc_buffer_get,
500  .buffer_put = nmc_buffer_put,
501 };
502 
503 AURA_TRANSPORT(nmc);
void aura_set_status(struct aura_node *node, int status)
Definition: aura.c:808
struct aura_buffer * aura_buffer_request(struct aura_node *nd, int size)
Definition: buffer.c:40
struct aura_buffer * aura_dequeue_buffer(struct list_head *head)
Definition: queue.c:45
const char * name
Required.
Definition: aura.h:168
struct aura_node * owner
Definition: aura.h:343
static void aura_set_userdata(struct aura_node *node, void *udata)
Definition: inlines.h:17
void aura_buffer_put_u64(struct aura_buffer *buf, uint64_t value)
Put an unsigned 64 bit integer to aura buffer.
uint64_t aura_buffer_get_u64(struct aura_buffer *buf)
Get an unsigned 64 bit integer from aura buffer.
void aura_queue_buffer(struct list_head *list, struct aura_buffer *buf)
Definition: queue.c:16
static void * aura_get_userdata(struct aura_node *node)
Definition: inlines.h:28
void aura_buffer_release(struct aura_buffer *buf)
Definition: buffer.c:80
struct aura_object * object
Definition: aura.h:341