#define RTE_MEM 1
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stddef.h>
#include <limits.h>
#include <inttypes.h>
#include <unistd.h>
#include <pthread.h>
#include <fcntl.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sched.h>
#include <rte_atomic_64.h>
#include "lthread_api.h"
#include "lthread_int.h"
#include "lthread_sched.h"
#include "lthread_objcache.h"
#include "lthread_timer.h"
#include "lthread_mutex.h"
#include "lthread_cond.h"
#include "lthread_tls.h"
#include "lthread_diag.h"
struct lthread_sched *schedcore[LTHREAD_MAX_LCORES];
diag_callback diag_cb;
uint64_t diag_mask;
{
    memset(schedcore, 0, sizeof(schedcore));
    diag_cb = NULL;
}
enum sched_alloc_phase {
    SCHED_ALLOC_OK,
    SCHED_ALLOC_QNODE_POOL,
    SCHED_ALLOC_READY_QUEUE,
    SCHED_ALLOC_PREADY_QUEUE,
    SCHED_ALLOC_LTHREAD_CACHE,
    SCHED_ALLOC_STACK_CACHE,
    SCHED_ALLOC_PERLT_CACHE,
    SCHED_ALLOC_TLS_CACHE,
    SCHED_ALLOC_COND_CACHE,
    SCHED_ALLOC_MUTEX_CACHE,
};
static int
_lthread_sched_alloc_resources(struct lthread_sched *new_sched)
{
    int alloc_status;
    do {
        
        alloc_status = SCHED_ALLOC_QNODE_POOL;
        new_sched->qnode_pool =
            _qnode_pool_create("qnode pool", LTHREAD_PREALLOC);
        if (new_sched->qnode_pool == NULL)
            break;
        
        alloc_status = SCHED_ALLOC_READY_QUEUE;
        new_sched->ready = _lthread_queue_create("ready queue");
        if (new_sched->ready == NULL)
            break;
        
        alloc_status = SCHED_ALLOC_PREADY_QUEUE;
        new_sched->pready = _lthread_queue_create("pready queue");
        if (new_sched->pready == NULL)
            break;
        
        alloc_status = SCHED_ALLOC_LTHREAD_CACHE;
        new_sched->lthread_cache =
            _lthread_objcache_create("lthread cache",
                        sizeof(struct lthread),
                        LTHREAD_PREALLOC);
        if (new_sched->lthread_cache == NULL)
            break;
        
        alloc_status = SCHED_ALLOC_STACK_CACHE;
        new_sched->stack_cache =
            _lthread_objcache_create("stack_cache",
                        sizeof(struct lthread_stack),
                        LTHREAD_PREALLOC);
        if (new_sched->stack_cache == NULL)
            break;
        
        alloc_status = SCHED_ALLOC_PERLT_CACHE;
        new_sched->per_lthread_cache =
            _lthread_objcache_create("per_lt cache",
                        RTE_PER_LTHREAD_SECTION_SIZE,
                        LTHREAD_PREALLOC);
        if (new_sched->per_lthread_cache == NULL)
            break;
        
        alloc_status = SCHED_ALLOC_TLS_CACHE;
        new_sched->tls_cache =
            _lthread_objcache_create("TLS cache",
                        sizeof(struct lthread_tls),
                        LTHREAD_PREALLOC);
        if (new_sched->tls_cache == NULL)
            break;
        
        alloc_status = SCHED_ALLOC_COND_CACHE;
        new_sched->cond_cache =
            _lthread_objcache_create("cond cache",
                        sizeof(struct lthread_cond),
                        LTHREAD_PREALLOC);
        if (new_sched->cond_cache == NULL)
            break;
        
        alloc_status = SCHED_ALLOC_MUTEX_CACHE;
        new_sched->mutex_cache =
            _lthread_objcache_create("mutex cache",
                        sizeof(struct lthread_mutex),
                        LTHREAD_PREALLOC);
        if (new_sched->mutex_cache == NULL)
            break;
        alloc_status = SCHED_ALLOC_OK;
    } while (0);
    
    switch (alloc_status) {
    case SCHED_ALLOC_MUTEX_CACHE:
        _lthread_objcache_destroy(new_sched->cond_cache);
        
    case SCHED_ALLOC_COND_CACHE:
        _lthread_objcache_destroy(new_sched->tls_cache);
        
    case SCHED_ALLOC_TLS_CACHE:
        _lthread_objcache_destroy(new_sched->per_lthread_cache);
        
    case SCHED_ALLOC_PERLT_CACHE:
        _lthread_objcache_destroy(new_sched->stack_cache);
        
    case SCHED_ALLOC_STACK_CACHE:
        _lthread_objcache_destroy(new_sched->lthread_cache);
        
    case SCHED_ALLOC_LTHREAD_CACHE:
        _lthread_queue_destroy(new_sched->pready);
        
    case SCHED_ALLOC_PREADY_QUEUE:
        _lthread_queue_destroy(new_sched->ready);
        
    case SCHED_ALLOC_READY_QUEUE:
        _qnode_pool_destroy(new_sched->qnode_pool);
        
    case SCHED_ALLOC_QNODE_POOL:
        
    case SCHED_ALLOC_OK:
        break;
    }
    return alloc_status;
}
struct lthread_sched *_lthread_sched_create(size_t stack_size)
{
    int status;
    struct lthread_sched *new_sched;
    RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
    if (stack_size == 0)
        stack_size = LTHREAD_MAX_STACK_SIZE;
    new_sched =
                RTE_CACHE_LINE_SIZE,
    if (new_sched == NULL) {
            "Failed to allocate memory for scheduler\n");
        return NULL;
    }
    _lthread_key_pool_init();
    new_sched->stack_size = stack_size;
    new_sched->birth = rte_rdtsc();
    THIS_SCHED = new_sched;
    status = _lthread_sched_alloc_resources(new_sched);
    if (status != SCHED_ALLOC_OK) {
            "Failed to allocate resources for scheduler code = %d\n",
            status);
        return NULL;
    }
    bzero(&new_sched->ctx, sizeof(struct ctx));
    new_sched->lcore_id = lcoreid;
    schedcore[lcoreid] = new_sched;
    new_sched->run_flag = 1;
    DIAG_EVENT(new_sched, LT_DIAG_SCHED_CREATE, 
rte_lcore_id(), 0);
    return new_sched;
}
int lthread_num_schedulers_set(int num)
{
}
int lthread_active_schedulers(void)
{
}
void lthread_scheduler_shutdown(unsigned lcoreid)
{
    uint64_t coreid = (uint64_t) lcoreid;
    if (coreid < LTHREAD_MAX_LCORES) {
        if (schedcore[coreid] != NULL)
            schedcore[coreid]->run_flag = 0;
    }
}
void lthread_scheduler_shutdown_all(void)
{
    uint64_t i;
    
        sched_yield();
    for (i = 0; i < LTHREAD_MAX_LCORES; i++) {
        if (schedcore[i] != NULL)
            schedcore[i]->run_flag = 0;
    }
}
_lthread_resume(struct lthread *lt);
static inline void _lthread_resume(struct lthread *lt)
{
    struct lthread_sched *sched = THIS_SCHED;
    struct lthread_stack *s;
    uint64_t state = lt->state;
#if LTHREAD_DIAG
    int init = 0;
#endif
    sched->current_lthread = lt;
    if (state & (BIT(ST_LT_CANCELLED) | BIT(ST_LT_EXITED))) {
        
        if (state & BIT(ST_LT_DETACH)) {
            _lthread_free(lt);
            sched->current_lthread = NULL;
            return;
        }
    }
    if (state & BIT(ST_LT_INIT)) {
        
        
        lt->sched = THIS_SCHED;
        
        s = _stack_alloc();
        lt->stack_container = s;
        _lthread_set_stack(lt, s->stack, s->stack_size);
        
        _lthread_tls_alloc(lt);
        lt->state = BIT(ST_LT_READY);
#if LTHREAD_DIAG
        init = 1;
#endif
    }
    DIAG_EVENT(lt, LT_DIAG_LTHREAD_RESUMED, init, lt);
    
    ctx_switch(<->ctx, &sched->ctx);
    
    if (lt->pending_wr_queue != NULL) {
        struct lthread_queue *dest = lt->pending_wr_queue;
        lt->pending_wr_queue = NULL;
        
        _lthread_queue_insert_mp(dest, lt);
    }
    sched->current_lthread = NULL;
}
void
_sched_timer_cb(
struct rte_timer *tim, 
void *arg)
{
    struct lthread *lt = (struct lthread *) arg;
    uint64_t state = lt->state;
    DIAG_EVENT(lt, LT_DIAG_LTHREAD_TMR_EXPIRED, <->tim, 0);
    if (lt->state & BIT(ST_LT_CANCELLED))
        (THIS_SCHED)->nb_blocked_threads--;
    lt->state = state | BIT(ST_LT_EXPIRED);
    _lthread_resume(lt);
    lt->state = state & CLEARBIT(ST_LT_EXPIRED);
}
static inline int _lthread_sched_isdone(struct lthread_sched *sched)
{
    return (sched->run_flag == 0) &&
            (_lthread_queue_empty(sched->ready)) &&
            (_lthread_queue_empty(sched->pready)) &&
            (sched->nb_blocked_threads == 0);
}
static inline void _lthread_schedulers_sync_start(void)
{
    
        sched_yield();
}
static inline void _lthread_schedulers_sync_stop(void)
{
    
        sched_yield();
}
void lthread_run(void)
{
    struct lthread_sched *sched = THIS_SCHED;
    struct lthread *lt = NULL;
        "starting scheduler %p on lcore %u phys core %u\n",
    
    _lthread_schedulers_sync_start();
    
    while (!_lthread_sched_isdone(sched)) {
        lt = _lthread_queue_poll(sched->ready);
        if (lt != NULL)
            _lthread_resume(lt);
        lt = _lthread_queue_poll(sched->pready);
        if (lt != NULL)
            _lthread_resume(lt);
    }
    
    _lthread_schedulers_sync_stop();
    (THIS_SCHED) = NULL;
        "stopping scheduler %p on lcore %u phys core %u\n",
    fflush(stdout);
}
struct lthread_sched *_lthread_sched_get(unsigned int lcore_id)
{
    struct lthread_sched *res = NULL;
    if (lcore_id < LTHREAD_MAX_LCORES)
        res = schedcore[lcore_id];
    return res;
}
int lthread_set_affinity(unsigned lcoreid)
{
    struct lthread *lt = THIS_LTHREAD;
    struct lthread_sched *dest_sched;
    if (
unlikely(lcoreid >= LTHREAD_MAX_LCORES))
         return POSIX_ERRNO(EINVAL);
    DIAG_EVENT(lt, LT_DIAG_LTHREAD_AFFINITY, lcoreid, 0);
    dest_sched = schedcore[lcoreid];
        return POSIX_ERRNO(EINVAL);
    if (
likely(dest_sched != THIS_SCHED)) {
         lt->sched = dest_sched;
        lt->pending_wr_queue = dest_sched->pready;
        _affinitize();
        return 0;
    }
    return 0;
}