diff --git a/core/net/mac/csma/csma.c b/core/net/mac/csma/csma.c index 56b71e8b0..a7bd08642 100644 --- a/core/net/mac/csma/csma.c +++ b/core/net/mac/csma/csma.c @@ -113,6 +113,9 @@ input_packet(void) } #endif /* CSMA_SEND_802154_ACK */ if(!duplicate) { + LOG_WARN("received packet from "); + LOG_WARN_LLADDR(packetbuf_addr(PACKETBUF_ADDR_SENDER)); + LOG_WARN_(", seqno %u, len %u\n", packetbuf_attr(PACKETBUF_ATTR_MAC_SEQNO), packetbuf_datalen()); NETSTACK_NETWORK.input(); } } diff --git a/core/net/mac/tsch/tsch-rpl.c b/core/net/mac/tsch/tsch-rpl.c index 166763ad8..4be622d7f 100644 --- a/core/net/mac/tsch/tsch-rpl.c +++ b/core/net/mac/tsch/tsch-rpl.c @@ -81,19 +81,18 @@ tsch_rpl_callback_leaving_network(void) /* Set TSCH EB period based on current RPL DIO period. * To use, set #define RPL_CALLBACK_NEW_DIO_INTERVAL tsch_rpl_callback_new_dio_interval */ void -tsch_rpl_callback_new_dio_interval(uint8_t dio_interval) +tsch_rpl_callback_new_dio_interval(clock_time_t dio_interval) { /* Transmit EBs only if we have a valid rank as per 6TiSCH minimal */ rpl_dag_t *dag; - rpl_instance_t *instance; rpl_rank_t root_rank; rpl_rank_t dag_rank; #if UIP_CONF_IPV6_RPL_LITE - instance = &curr_instance; dag = &curr_instance.dag; root_rank = ROOT_RANK; dag_rank = DAG_RANK(dag->rank); #else + rpl_instance_t *instance; dag = rpl_get_any_dag(); instance = dag != NULL ? dag->instance : NULL; root_rank = ROOT_RANK(instance); @@ -106,11 +105,11 @@ tsch_rpl_callback_new_dio_interval(uint8_t dio_interval) tsch_set_coordinator(1); } /* Set EB period */ - tsch_set_eb_period((CLOCK_SECOND * 1UL << instance->dag.dio_intcurrent) / 1000); + tsch_set_eb_period(dio_interval); /* Set join priority based on RPL rank */ tsch_set_join_priority(dag_rank - 1); } else { - tsch_set_eb_period(0); + tsch_set_eb_period(TSCH_EB_PERIOD); } } /*---------------------------------------------------------------------------*/ diff --git a/core/net/mac/tsch/tsch-rpl.h b/core/net/mac/tsch/tsch-rpl.h index 180ec2bdd..0ba747a45 100644 --- a/core/net/mac/tsch/tsch-rpl.h +++ b/core/net/mac/tsch/tsch-rpl.h @@ -50,8 +50,8 @@ void tsch_rpl_callback_joining_network(void); * To use, set #define TSCH_CALLBACK_LEAVING_NETWORK tsch_rpl_callback_leaving_network */ void tsch_rpl_callback_leaving_network(void); /* Set TSCH EB period based on current RPL DIO period. - * To use, set #define RPL_CALLBACK_PARENT_SWITCH tsch_rpl_callback_new_dio_interval */ -void tsch_rpl_callback_new_dio_interval(uint8_t dio_interval); + * To use, set #define RPL_CALLBACK_NEW_DIO_INTERVAL tsch_rpl_callback_new_dio_interval */ +void tsch_rpl_callback_new_dio_interval(clock_time_t dio_interval); /* Set TSCH time source based on current RPL preferred parent. * To use, set #define RPL_CALLBACK_PARENT_SWITCH tsch_rpl_callback_parent_switch */ void tsch_rpl_callback_parent_switch(rpl_parent_t *old, rpl_parent_t *new); diff --git a/core/net/rpl-lite/rpl-conf.h b/core/net/rpl-lite/rpl-conf.h index 22cd79229..03534ac76 100644 --- a/core/net/rpl-lite/rpl-conf.h +++ b/core/net/rpl-lite/rpl-conf.h @@ -122,7 +122,7 @@ #ifdef RPL_CONF_WITH_DAO_ACK #define RPL_WITH_DAO_ACK RPL_CONF_WITH_DAO_ACK #else -#define RPL_WITH_DAO_ACK 0 +#define RPL_WITH_DAO_ACK 1 #endif /* RPL_CONF_WITH_DAO_ACK */ /* diff --git a/core/net/rpl-lite/rpl-const.h b/core/net/rpl-lite/rpl-const.h index e7fe0eed1..9a3ae11a2 100644 --- a/core/net/rpl-lite/rpl-const.h +++ b/core/net/rpl-lite/rpl-const.h @@ -49,7 +49,7 @@ /* Special value indicating infinite lifetime. */ #define RPL_INFINITE_LIFETIME 0xFF #define RPL_ROUTE_INFINITE_LIFETIME 0xFFFFFFFF -#define RPL_INFINITE_RANK 0xffff +#define RPL_INFINITE_RANK 0xFFFF /*---------------------------------------------------------------------------*/ /* IANA Routing Metric/Constraint Type as defined in RFC6551 */ diff --git a/core/net/rpl-lite/rpl-dag.c b/core/net/rpl-lite/rpl-dag.c index d03efa9a0..9663ad141 100644 --- a/core/net/rpl-lite/rpl-dag.c +++ b/core/net/rpl-lite/rpl-dag.c @@ -136,7 +136,7 @@ void rpl_global_repair(void) { if(rpl_dag_root_is_root()) { - LOG_WARN("initiating global repair (version=%u, rank=%u)\n", + LOG_WARN("initiating global repair, version %u, rank %u)\n", curr_instance.dag.version, curr_instance.dag.rank); #if LOG_INFO_ENABLED rpl_neighbor_print_list("Global repair"); @@ -153,7 +153,7 @@ static void global_repair_non_root(rpl_dio_t *dio) { if(!rpl_dag_root_is_root()) { - LOG_WARN("participating in global repair (version=%u, rank=%u)\n", + LOG_WARN("participating in global repair, version %u, rank %u)\n", curr_instance.dag.version, curr_instance.dag.rank); #if LOG_INFO_ENABLED rpl_neighbor_print_list("Global repair"); @@ -170,6 +170,7 @@ rpl_local_repair(const char *str) if(curr_instance.used) { /* Check needed because this is a public function */ LOG_WARN("local repair (%s)\n", str); curr_instance.of->reset(); /* Reset OF */ + curr_instance.dag.is_reachable = 0; /* Assume we are no longer reachable */ rpl_neighbor_remove_all(); /* Remove all neighbors */ rpl_timers_dio_reset("Local repair"); /* Reset Trickle timer */ rpl_timers_schedule_state_update(); @@ -183,7 +184,6 @@ rpl_dag_update_state(void) if(curr_instance.used) { if(!rpl_dag_root_is_root()) { rpl_nbr_t *old_parent = curr_instance.dag.preferred_parent; - rpl_rank_t old_rank = curr_instance.dag.rank; /* Any scheduled state update is no longer needed */ rpl_timers_unschedule_state_update(); @@ -199,18 +199,23 @@ rpl_dag_update_state(void) curr_instance.dag.lowest_rank = curr_instance.dag.rank; } - /* if new parent, schedule DAO */ + /* Parent switch */ if(curr_instance.dag.preferred_parent != old_parent) { + /* Schedule a DAO */ rpl_timers_schedule_dao(); + /* We just got a parent (was NULL), reset trickle timer to advertise this */ + if(old_parent == NULL) { + rpl_timers_dio_reset("Got parent"); + } + /* We have no more parent, schedule DIS to get a chance to hear updated state */ + if(curr_instance.dag.preferred_parent == NULL) { + LOG_WARN("no parnt, scheduling periodic DIS\n"); + rpl_timers_schedule_periodic_dis(); + } #if LOG_INFO_ENABLED rpl_neighbor_print_list("Parent switch"); #endif /* LOG_INFO_ENABLED */ } - - if(curr_instance.dag.rank != old_rank && curr_instance.dag.rank == RPL_INFINITE_RANK) { - LOG_WARN("intinite rank, trigger local repair\n"); - rpl_local_repair("Infinite rank"); - } } /* Finally, update metric container */ @@ -308,8 +313,10 @@ process_dio_from_current_dag(uip_ipaddr_t *from, rpl_dio_t *dio) return; } - /* Refresh lifetime at every DIO from preferred parent. Use same lifetime as for routes */ - if(p != NULL && p == curr_instance.dag.preferred_parent) { + /* Init lifetime if not set yet. Refresh it at every DIO from preferred parent. + Use same lifetime as for routes. */ + if(curr_instance.dag.lifetime == 0 || + (p != NULL && p == curr_instance.dag.preferred_parent)) { curr_instance.dag.lifetime = RPL_LIFETIME(dio->default_lifetime); } @@ -319,6 +326,8 @@ process_dio_from_current_dag(uip_ipaddr_t *from, rpl_dio_t *dio) if(curr_instance.mop != RPL_MOP_NO_DOWNWARD_ROUTES) { if(p != NULL && p == curr_instance.dag.preferred_parent && rpl_lollipop_greater_than(dio->dtsn, last_dtsn)) { RPL_LOLLIPOP_INCREMENT(curr_instance.dtsn_out); + LOG_INFO("DTSN increment %u->%u, schedule new DAO with DTSN %u", + last_dtsn, dio->dtsn, curr_instance.dtsn_out); rpl_timers_schedule_dao(); } } @@ -487,7 +496,6 @@ rpl_process_dao_ack(uint8_t sequence, uint8_t status) } /* Is this an ACK for our last DAO? */ if(sequence == curr_instance.dag.dao_last_seqno) { - /* stop the retransmit timer when the ACK arrived */ curr_instance.dag.is_reachable = status < RPL_DAO_ACK_UNABLE_TO_ACCEPT; if(status >= RPL_DAO_ACK_UNABLE_TO_ACCEPT) { @@ -567,6 +575,7 @@ rpl_dag_init_root(uint8_t instance_id, uip_ipaddr_t *dag_id, curr_instance.dag.rank = ROOT_RANK; curr_instance.dag.lifetime = RPL_LIFETIME(RPL_INFINITE_LIFETIME); curr_instance.dag.dio_intcurrent = RPL_DIO_INTERVAL_MIN; + curr_instance.dag.is_reachable = 1; rpl_timers_dio_reset("Init root"); diff --git a/core/net/rpl-lite/rpl-icmp6.c b/core/net/rpl-lite/rpl-icmp6.c index ec5c0f5c2..1e54c2237 100644 --- a/core/net/rpl-lite/rpl-icmp6.c +++ b/core/net/rpl-lite/rpl-icmp6.c @@ -323,8 +323,9 @@ dio_input(void) LOG_INFO_6ADDR(&from); LOG_INFO_(", instance_id %u, DAG ID ", (unsigned)dio.instance_id); LOG_INFO_6ADDR(&dio.dag_id); - LOG_INFO_(", version %u, rank %u\n", + LOG_INFO_(", version %u, dtsn %u, rank %u\n", (unsigned)dio.version, + (unsigned)dio.dtsn, (unsigned)dio.rank); rpl_process_dio(&from, &dio); @@ -558,8 +559,8 @@ rpl_icmp6_dao_output(uint8_t lifetime) if(!curr_instance.used || curr_instance.dag.preferred_parent == NULL || prefix == NULL || parent_ipaddr == NULL || curr_instance.mop == RPL_MOP_NO_DOWNWARD_ROUTES) { - LOG_WARN("rpl_icmp6_dao_output: node not ready to send a DAO (used %u, pref parent %p, prefix %p, parent_ipaddr %p, mop %u)\n", - curr_instance.used, curr_instance.dag.preferred_parent, prefix, parent_ipaddr, curr_instance.mop); + LOG_WARN("rpl_icmp6_dao_output: node not ready to send a DAO (used %u, pref parent %u, prefix %u, mop %u)\n", + curr_instance.used, curr_instance.dag.preferred_parent != NULL && parent_ipaddr != NULL, prefix != NULL, curr_instance.mop); return; } diff --git a/core/net/rpl-lite/rpl-mrhof.c b/core/net/rpl-lite/rpl-mrhof.c index f9c70913e..99a15d79c 100644 --- a/core/net/rpl-lite/rpl-mrhof.c +++ b/core/net/rpl-lite/rpl-mrhof.c @@ -69,22 +69,21 @@ #ifdef RPL_MRHOF_CONF_SQUARED_ETX #define RPL_MRHOF_SQUARED_ETX RPL_MRHOF_CONF_SQUARED_ETX #else /* RPL_MRHOF_CONF_SQUARED_ETX */ -#define RPL_MRHOF_SQUARED_ETX 0 +#define RPL_MRHOF_SQUARED_ETX 1 #endif /* RPL_MRHOF_CONF_SQUARED_ETX */ #if !RPL_MRHOF_SQUARED_ETX /* Configuration parameters of RFC6719. Reject parents that have a higher - * link metric than the following. The default value is 512 but we use 1024. */ -#define MAX_LINK_METRIC 1024 /* Eq ETX of 8 */ + * link metric than the following. The default value is 512. */ +#define MAX_LINK_METRIC 512 /* Eq ETX of 4 */ /* Hysteresis of MRHOF: the rank must differ more than PARENT_SWITCH_THRESHOLD_DIV * in order to switch preferred parent. Default in RFC6719: 192, eq ETX of 1.5. * We use a more aggressive setting: 96, eq ETX of 0.75. */ -#define PARENT_SWITCH_THRESHOLD 96 /* Eq ETX of 0.75 */ +#define PARENT_SWITCH_THRESHOLD 192 /* Eq ETX of 1.5 */ #else /* !RPL_MRHOF_SQUARED_ETX */ #define MAX_LINK_METRIC 2048 /* Eq ETX of 4 */ -#define PARENT_SWITCH_THRESHOLD 160 /* Eq ETX of 1.25 (results in a churn comparable -to the threshold of 96 in the non-squared case) */ +#define PARENT_SWITCH_THRESHOLD 512 /* Eq ETX of 2 */ #endif /* !RPL_MRHOF_SQUARED_ETX */ /* Reject parents that have a higher path cost than the following. */ diff --git a/core/net/rpl-lite/rpl-neighbor.c b/core/net/rpl-lite/rpl-neighbor.c index 7bb9a56f6..304f14bdd 100644 --- a/core/net/rpl-lite/rpl-neighbor.c +++ b/core/net/rpl-lite/rpl-neighbor.c @@ -85,14 +85,15 @@ rpl_neighbor_print_list(const char *str) LOG_INFO("nbr: own state, addr "); LOG_INFO_6ADDR(rpl_get_global_address()); - LOG_INFO_(" MOP %u OCP %u rank %u dioint %u, DS6 nbr count %u (%s)\n", + LOG_INFO_(" MOP %u OCP %u rank %u max-rank %u, dioint %u, DS6 nbr count %u (%s)\n", curr_instance.mop, curr_instance.of->ocp, curr_rank, + curr_instance.max_rankinc != 0 ? curr_instance.dag.lowest_rank + curr_instance.max_rankinc : 0xffff, curr_dio_interval, uip_ds6_nbr_num(), str); while(nbr != NULL) { const struct link_stats *stats = rpl_neighbor_get_link_stats(nbr); LOG_INFO("nbr: "); LOG_INFO_6ADDR(rpl_neighbor_get_ipaddr(nbr)); - LOG_INFO_(" %5u, %5u => %5u -- %2u %c%c%c%c (last tx %u min ago)\n", + LOG_INFO_(" %5u, %5u => %5u -- %2u %c%c%c%c", nbr->rank, rpl_neighbor_get_link_metric(nbr), rpl_neighbor_rank_via_nbr(nbr), @@ -100,9 +101,13 @@ rpl_neighbor_print_list(const char *str) (nbr->rank == ROOT_RANK) ? 'r' : ' ', (acceptable_rank(rpl_neighbor_rank_via_nbr(nbr)) && curr_instance.of->nbr_is_acceptable_parent(nbr)) ? 'a' : ' ', link_stats_is_fresh(stats) ? 'f' : ' ', - nbr == curr_instance.dag.preferred_parent ? 'p' : ' ', - (unsigned)((clock_now - stats->last_tx_time) / (60 * CLOCK_SECOND)) + nbr == curr_instance.dag.preferred_parent ? 'p' : ' ' ); + if(stats->last_tx_time > 0) { + LOG_INFO_(" (last tx %u min ago)\n", (unsigned)((clock_now - stats->last_tx_time) / (60 * CLOCK_SECOND))); + } else { + LOG_INFO_(" (no tx)\n"); + } nbr = nbr_table_next(rpl_neighbors, nbr); } LOG_INFO("nbr: end of list\n"); @@ -216,10 +221,10 @@ void rpl_neighbor_set_preferred(rpl_nbr_t *nbr) { if(curr_instance.dag.preferred_parent != nbr) { - LOG_INFO("parent switch "); - LOG_INFO_6ADDR(rpl_neighbor_get_ipaddr(nbr)); - LOG_INFO_(" used to be "); + LOG_INFO("parent switch: "); LOG_INFO_6ADDR(rpl_neighbor_get_ipaddr(curr_instance.dag.preferred_parent)); + LOG_INFO_(" -> "); + LOG_INFO_6ADDR(rpl_neighbor_get_ipaddr(nbr)); LOG_INFO_("\n"); #ifdef RPL_CALLBACK_PARENT_SWITCH @@ -324,20 +329,30 @@ rpl_neighbor_select_best(void) if(rpl_neighbor_is_fresh(best)) { return best; } else { - /* The best is not fresh. Look for the best fresh now. */ - rpl_nbr_t *best_fresh = best_parent(1); + rpl_nbr_t *best_fresh; + + /* The best is not fresh. Probe it. */ + curr_instance.dag.urgent_probing_target = best; + LOG_WARN("best parent is not fresh, schedule urgent probing to "); + LOG_WARN_6ADDR(rpl_neighbor_get_ipaddr(best)); + LOG_WARN_("\n"); + rpl_schedule_probing(); + + /* Look for the best fresh parent. */ + best_fresh = best_parent(1); if(best_fresh == NULL) { - /* No fresh parent around, select best (non-fresh) */ - return best; + if(curr_instance.dag.preferred_parent == NULL) { + /* We will wait to find a fresh node before selecting our first parent */ + return NULL; + } else { + /* We already have a parent, now stick to the best and count on + urgent probing to get a fresh parent soon */ + return best; + } } else { /* Select best fresh */ return best_fresh; } - /* Probe the best parent shortly in order to get a fresh estimate */ - curr_instance.dag.urgent_probing_target = best; - rpl_schedule_probing(); - /* Stick to current preferred parent until a better one is fresh */ - return curr_instance.dag.preferred_parent; } } else { return NULL; diff --git a/core/net/rpl-lite/rpl-timers.c b/core/net/rpl-lite/rpl-timers.c index 0cfda4abe..d50ef987b 100644 --- a/core/net/rpl-lite/rpl-timers.c +++ b/core/net/rpl-lite/rpl-timers.c @@ -55,7 +55,7 @@ /* A configurable function called after update of the RPL DIO interval */ #ifdef RPL_CALLBACK_NEW_DIO_INTERVAL -void RPL_CALLBACK_NEW_DIO_INTERVAL(uint8_t dio_interval); +void RPL_CALLBACK_NEW_DIO_INTERVAL(clock_time_t dio_interval); #endif /* RPL_CALLBACK_NEW_DIO_INTERVAL */ #ifdef RPL_PROBING_SELECT_FUNC @@ -67,7 +67,7 @@ clock_time_t RPL_PROBING_DELAY_FUNC(void); #endif /* RPL_PROBING_DELAY_FUNC */ #define PERIODIC_DELAY_SECONDS 60 -#define PERIODIC_DELAY (60 * CLOCK_SECOND) +#define PERIODIC_DELAY ((PERIODIC_DELAY_SECONDS) * CLOCK_SECOND) static void handle_dis_timer(void *ptr); static void handle_dio_timer(void *ptr); @@ -92,15 +92,19 @@ static struct ctimer periodic_timer; /* Not part of a DAG because used for gener void rpl_timers_schedule_periodic_dis(void) { - clock_time_t expiration_time = RPL_DIS_INTERVAL / 2 + (random_rand() % (RPL_DIS_INTERVAL)); - ctimer_set(&dis_timer, expiration_time, handle_dis_timer, NULL); + if(etimer_expired(&dis_timer.etimer)) { + clock_time_t expiration_time = RPL_DIS_INTERVAL / 2 + (random_rand() % (RPL_DIS_INTERVAL)); + ctimer_set(&dis_timer, expiration_time, handle_dis_timer, NULL); + } } /*---------------------------------------------------------------------------*/ static void handle_dis_timer(void *ptr) { if(!rpl_dag_root_is_root() && - (!curr_instance.used || curr_instance.dag.preferred_parent == NULL)) { + (!curr_instance.used || + curr_instance.dag.preferred_parent == NULL || + curr_instance.dag.rank == RPL_INFINITE_RANK)) { /* Send DIS and schedule next */ rpl_icmp6_dis_output(NULL); rpl_timers_schedule_periodic_dis(); @@ -138,7 +142,7 @@ new_dio_interval(void) ctimer_set(&curr_instance.dag.dio_timer, ticks, &handle_dio_timer, NULL); #ifdef RPL_CALLBACK_NEW_DIO_INTERVAL - RPL_CALLBACK_NEW_DIO_INTERVAL(curr_instance.dag.dio_intcurrent); + RPL_CALLBACK_NEW_DIO_INTERVAL((CLOCK_SECOND * 1UL << curr_instance.dag.dio_intcurrent) / 1000); #endif /* RPL_CALLBACK_NEW_DIO_INTERVAL */ } /*---------------------------------------------------------------------------*/ @@ -173,6 +177,7 @@ handle_dio_timer(void *ptr) if((count++ % RPL_TRICKLE_REFRESH_DAO_ROUTES) == 0) { /* Request new DAO to refresh route. */ RPL_LOLLIPOP_INCREMENT(curr_instance.dtsn_out); + LOG_INFO("trigger DAO updates with a DTSN increment (%u)\n", curr_instance.dtsn_out); } } #endif /* RPL_TRICKLE_REFRESH_DAO_ROUTES */ @@ -250,7 +255,7 @@ schedule_dao_refresh(void) void rpl_timers_schedule_dao(void) { - if(curr_instance.used) { + if(curr_instance.used && curr_instance.mop != RPL_MOP_NO_DOWNWARD_ROUTES) { /* No need for aggregation delay as per RFC 6550 section 9.5, as this only * serves storing mode. Use simply delay instead, with the only PURPOSE * to reduce congestion. */ @@ -287,7 +292,10 @@ handle_dao_timer(void *ptr) curr_instance.dag.dao_transmissions++; /* Schedule next retransmission */ schedule_dao_retransmission(); -#endif /* RPL_WITH_DAO_ACK */ +#else /* RPL_WITH_DAO_ACK */ + /* No DAO-ACK: assume we are reachable as soon as we send a DAO */ + curr_instance.dag.is_reachable = 1; +#endif /* !RPL_WITH_DAO_ACK */ curr_instance.dag.dao_last_seqno = curr_instance.dag.dao_curr_seqno; /* Send a DAO with own prefix as target and default lifetime */ @@ -313,6 +321,7 @@ rpl_timers_schedule_dao_ack(uip_ipaddr_t *target, uint16_t sequence) ctimer_set(&curr_instance.dag.dao_ack_timer, 0, handle_dao_ack_timer, NULL); } } +/*---------------------------------------------------------------------------*/ static void handle_dao_ack_timer(void *ptr) { @@ -329,7 +338,7 @@ get_probing_delay(void) { if(curr_instance.used && curr_instance.dag.urgent_probing_target != NULL) { /* Urgent probing needed (to find out if a neighbor may become preferred parent) */ - return random_rand() % (CLOCK_SECOND * 10); + return random_rand() % (CLOCK_SECOND * 4); } else { /* Else, use normal probing interval */ return ((RPL_PROBING_INTERVAL) / 2) + random_rand() % (RPL_PROBING_INTERVAL); @@ -455,10 +464,10 @@ handle_periodic_timer(void *ptr) rpl_ns_periodic(PERIODIC_DELAY_SECONDS); } - if(!curr_instance.used || curr_instance.dag.preferred_parent == NULL) { - if(etimer_expired(&dis_timer.etimer)) { - rpl_timers_schedule_periodic_dis(); /* Schedule DIS if needed */ - } + if(!curr_instance.used || + curr_instance.dag.preferred_parent == NULL || + curr_instance.dag.rank == RPL_INFINITE_RANK) { + rpl_timers_schedule_periodic_dis(); /* Schedule DIS if needed */ } ctimer_reset(&periodic_timer); diff --git a/core/net/rpl-lite/rpl.c b/core/net/rpl-lite/rpl.c index 77a8e3941..0cb526e97 100644 --- a/core/net/rpl-lite/rpl.c +++ b/core/net/rpl-lite/rpl.c @@ -95,7 +95,7 @@ rpl_link_neighbor_callback(const linkaddr_t *addr, int status, int numtx) Updating from here is unsafe; postpone */ LOG_INFO("packet sent to "); LOG_INFO_LLADDR(addr); - LOG_INFO_(", status %u, tx %u\n", status, numtx); + LOG_INFO_(", status %u, tx %u, new link metric %u\n", status, numtx, rpl_neighbor_get_link_metric(nbr)); rpl_timers_schedule_state_update(); } } diff --git a/core/net/rpl/rpl-timers.c b/core/net/rpl/rpl-timers.c index 09ff1c813..7f071b155 100644 --- a/core/net/rpl/rpl-timers.c +++ b/core/net/rpl/rpl-timers.c @@ -54,7 +54,7 @@ /* A configurable function called after update of the RPL DIO interval */ #ifdef RPL_CALLBACK_NEW_DIO_INTERVAL -void RPL_CALLBACK_NEW_DIO_INTERVAL(uint8_t dio_interval); +void RPL_CALLBACK_NEW_DIO_INTERVAL(clock_time_t dio_interval); #endif /* RPL_CALLBACK_NEW_DIO_INTERVAL */ #ifdef RPL_PROBING_SELECT_FUNC @@ -150,7 +150,7 @@ new_dio_interval(rpl_instance_t *instance) ctimer_set(&instance->dio_timer, ticks, &handle_dio_timer, instance); #ifdef RPL_CALLBACK_NEW_DIO_INTERVAL - RPL_CALLBACK_NEW_DIO_INTERVAL(instance->dio_intcurrent); + RPL_CALLBACK_NEW_DIO_INTERVAL((CLOCK_SECOND * 1UL << instance->dio_intcurrent) / 1000); #endif /* RPL_CALLBACK_NEW_DIO_INTERVAL */ } /*---------------------------------------------------------------------------*/ diff --git a/regression-tests/14-rpl-lite/04-rpl-28-hours-dao-ack.csc b/regression-tests/14-rpl-lite/04-rpl-28-hours-no-dao-ack.csc similarity index 98% rename from regression-tests/14-rpl-lite/04-rpl-28-hours-dao-ack.csc rename to regression-tests/14-rpl-lite/04-rpl-28-hours-no-dao-ack.csc index fa8307300..ba628ae81 100644 --- a/regression-tests/14-rpl-lite/04-rpl-28-hours-dao-ack.csc +++ b/regression-tests/14-rpl-lite/04-rpl-28-hours-no-dao-ack.csc @@ -26,7 +26,7 @@ Sender [CONFIG_DIR]/code/sender-node.c make TARGET=cooja clean -make sender-node.cooja DEFINES=RPL_CONF_WITH_DAO_ACK=1 TARGET=cooja +make sender-node.cooja DEFINES=RPL_CONF_WITH_DAO_ACK=0 TARGET=cooja org.contikios.cooja.interfaces.Position org.contikios.cooja.interfaces.Battery org.contikios.cooja.contikimote.interfaces.ContikiVib @@ -51,7 +51,7 @@ make sender-node.cooja DEFINES=RPL_CONF_WITH_DAO_ACK=1 TARGET=cooja RPL root [CONFIG_DIR]/code/root-node.c make TARGET=cooja clean -make root-node.cooja DEFINES=RPL_CONF_WITH_DAO_ACK=1 TARGET=cooja +make root-node.cooja DEFINES=RPL_CONF_WITH_DAO_ACK=0 TARGET=cooja org.contikios.cooja.interfaces.Position org.contikios.cooja.interfaces.Battery org.contikios.cooja.contikimote.interfaces.ContikiVib @@ -76,7 +76,7 @@ make root-node.cooja DEFINES=RPL_CONF_WITH_DAO_ACK=1 TARGET=cooja Receiver [CONFIG_DIR]/code/receiver-node.c make TARGET=cooja clean -make receiver-node.cooja DEFINES=RPL_CONF_WITH_DAO_ACK=1 TARGET=cooja +make receiver-node.cooja DEFINES=RPL_CONF_WITH_DAO_ACK=0 TARGET=cooja org.contikios.cooja.interfaces.Position org.contikios.cooja.interfaces.Battery org.contikios.cooja.contikimote.interfaces.ContikiVib