Commit 4a7428c5 authored by Christopher Covington's avatar Christopher Covington Committed by Michael Tokarev
Browse files

s/cpu_get_real_ticks/cpu_get_host_ticks/



This should help clarify the purpose of the function that returns
the host system's CPU cycle count.

Signed-off-by: default avatarChristopher Covington <cov@codeaurora.org>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
ppc portion
Acked-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarMichael Tokarev <mjt@tls.msk.ru>
parent ec5fd402
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -108,7 +108,7 @@ void cpu_list_unlock(void)

uint64_t cpu_get_tsc(CPUX86State *env)
{
    return cpu_get_real_ticks();
    return cpu_get_host_ticks();
}

static void write_dt(void *ptr, unsigned long addr, unsigned long limit,
+3 −3
Original line number Diff line number Diff line
@@ -199,7 +199,7 @@ int64_t cpu_get_ticks(void)

    ticks = timers_state.cpu_ticks_offset;
    if (timers_state.cpu_ticks_enabled) {
        ticks += cpu_get_real_ticks();
        ticks += cpu_get_host_ticks();
    }

    if (timers_state.cpu_ticks_prev > ticks) {
@@ -247,7 +247,7 @@ void cpu_enable_ticks(void)
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
    if (!timers_state.cpu_ticks_enabled) {
        timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
        timers_state.cpu_ticks_offset -= cpu_get_host_ticks();
        timers_state.cpu_clock_offset -= get_clock();
        timers_state.cpu_ticks_enabled = 1;
    }
@@ -263,7 +263,7 @@ void cpu_disable_ticks(void)
    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
    seqlock_write_lock(&timers_state.vm_clock_seqlock);
    if (timers_state.cpu_ticks_enabled) {
        timers_state.cpu_ticks_offset += cpu_get_real_ticks();
        timers_state.cpu_ticks_offset += cpu_get_host_ticks();
        timers_state.cpu_clock_offset = cpu_get_clock_locked();
        timers_state.cpu_ticks_enabled = 0;
    }
+1 −1
Original line number Diff line number Diff line
@@ -848,7 +848,7 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr,
    uint32_t xirr = icp_accept(ss);

    args[0] = xirr;
    args[1] = cpu_get_real_ticks();
    args[1] = cpu_get_host_ticks();
    return H_SUCCESS;
}

+2 −2
Original line number Diff line number Diff line
@@ -834,7 +834,7 @@ static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
static void timebase_pre_save(void *opaque)
{
    PPCTimebase *tb = opaque;
    uint64_t ticks = cpu_get_real_ticks();
    uint64_t ticks = cpu_get_host_ticks();
    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);

    if (!first_ppc_cpu->env.tb_env) {
@@ -878,7 +878,7 @@ static int timebase_post_load(void *opaque, int version_id)
                                     NANOSECONDS_PER_SECOND);
    guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb);

    tb_off_adj = guest_tb - cpu_get_real_ticks();
    tb_off_adj = guest_tb - cpu_get_host_ticks();

    tb_off = first_ppc_cpu->env.tb_env->tb_offset;
    trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
+10 −10
Original line number Diff line number Diff line
@@ -857,7 +857,7 @@ int64_t cpu_icount_to_ns(int64_t icount);

#if defined(_ARCH_PPC)

static inline int64_t cpu_get_real_ticks(void)
static inline int64_t cpu_get_host_ticks(void)
{
    int64_t retval;
#ifdef _ARCH_PPC64
@@ -883,7 +883,7 @@ static inline int64_t cpu_get_real_ticks(void)

#elif defined(__i386__)

static inline int64_t cpu_get_real_ticks(void)
static inline int64_t cpu_get_host_ticks(void)
{
    int64_t val;
    asm volatile ("rdtsc" : "=A" (val));
@@ -892,7 +892,7 @@ static inline int64_t cpu_get_real_ticks(void)

#elif defined(__x86_64__)

static inline int64_t cpu_get_real_ticks(void)
static inline int64_t cpu_get_host_ticks(void)
{
    uint32_t low,high;
    int64_t val;
@@ -905,7 +905,7 @@ static inline int64_t cpu_get_real_ticks(void)

#elif defined(__hppa__)

static inline int64_t cpu_get_real_ticks(void)
static inline int64_t cpu_get_host_ticks(void)
{
    int val;
    asm volatile ("mfctl %%cr16, %0" : "=r"(val));
@@ -914,7 +914,7 @@ static inline int64_t cpu_get_real_ticks(void)

#elif defined(__ia64)

static inline int64_t cpu_get_real_ticks(void)
static inline int64_t cpu_get_host_ticks(void)
{
    int64_t val;
    asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
@@ -923,7 +923,7 @@ static inline int64_t cpu_get_real_ticks(void)

#elif defined(__s390__)

static inline int64_t cpu_get_real_ticks(void)
static inline int64_t cpu_get_host_ticks(void)
{
    int64_t val;
    asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
@@ -932,7 +932,7 @@ static inline int64_t cpu_get_real_ticks(void)

#elif defined(__sparc__)

static inline int64_t cpu_get_real_ticks (void)
static inline int64_t cpu_get_host_ticks (void)
{
#if defined(_LP64)
    uint64_t        rval;
@@ -970,7 +970,7 @@ static inline int64_t cpu_get_real_ticks (void)
                              : "=r" (value));          \
    }

static inline int64_t cpu_get_real_ticks(void)
static inline int64_t cpu_get_host_ticks(void)
{
    /* On kernels >= 2.6.25 rdhwr <reg>, $2 and $3 are emulated */
    uint32_t count;
@@ -986,7 +986,7 @@ static inline int64_t cpu_get_real_ticks(void)

#elif defined(__alpha__)

static inline int64_t cpu_get_real_ticks(void)
static inline int64_t cpu_get_host_ticks(void)
{
    uint64_t cc;
    uint32_t cur, ofs;
@@ -1001,7 +1001,7 @@ static inline int64_t cpu_get_real_ticks(void)
/* The host CPU doesn't have an easily accessible cycle counter.
   Just return a monotonically increasing value.  This will be
   totally wrong, but hopefully better than nothing.  */
static inline int64_t cpu_get_real_ticks (void)
static inline int64_t cpu_get_host_ticks (void)
{
    static int64_t ticks = 0;
    return ticks++;
Loading