base address must be 64 byte aligned and exist within a valid guest memory
 region. See Documentation/virt/kvm/arm/pvtime.rst for more information
 including the layout of the stolen time structure.
+
+4. GROUP: KVM_VCPU_TSC_CTRL
+===========================
+
+:Architectures: x86
+
+4.1 ATTRIBUTE: KVM_VCPU_TSC_OFFSET
+
+:Parameters: 64-bit unsigned TSC offset
+
+Returns:
+
+        ======= ======================================
+        -EFAULT Error reading/writing the provided
+                parameter address.
+        -ENXIO  Attribute not supported
+        ======= ======================================
+
+Specifies the guest's TSC offset relative to the host's TSC. The guest's
+TSC is then derived by the following equation:
+
+  guest_tsc = host_tsc + KVM_VCPU_TSC_OFFSET
+
+This attribute is useful for the precise migration of a guest's TSC. The
+following describes a possible algorithm to use for the migration of a
+guest's TSC:
+
+From the source VMM process:
+
+1. Invoke the KVM_GET_CLOCK ioctl to record the host TSC (t_0),
+   kvmclock nanoseconds (k_0), and realtime nanoseconds (r_0).
+
+2. Read the KVM_VCPU_TSC_OFFSET attribute for every vCPU to record the
+   guest TSC offset (off_n).
+
+3. Invoke the KVM_GET_TSC_KHZ ioctl to record the frequency of the
+   guest's TSC (freq).
+
+From the destination VMM process:
+
+4. Invoke the KVM_SET_CLOCK ioctl, providing the kvmclock nanoseconds
+   (k_0) and realtime nanoseconds (r_0) in their respective fields.
+   Ensure that the KVM_CLOCK_REALTIME flag is set in the provided
+   structure. KVM will advance the VM's kvmclock to account for elapsed
+   time since recording the clock values.
+
+5. Invoke the KVM_GET_CLOCK ioctl to record the host TSC (t_1) and
+   kvmclock nanoseconds (k_1).
+
+6. Adjust the guest TSC offsets for every vCPU to account for (1) time
+   elapsed since recording state and (2) difference in TSCs between the
+   source and destination machine:
+
+   new_off_n = t_0 + off_n + (k_1 - k_0) * freq - t_1
+
+7. Write the KVM_VCPU_TSC_OFFSET attribute for every vCPU with the
+   respective value derived in the previous step.
 
        kvm->arch.last_tsc_nsec = ns;
        kvm->arch.last_tsc_write = tsc;
        kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
+       kvm->arch.last_tsc_offset = offset;
 
        vcpu->arch.last_guest_tsc = tsc;
 
        case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
        case KVM_CAP_SREGS2:
        case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
+       case KVM_CAP_VCPU_ATTRIBUTES:
                r = 1;
                break;
        case KVM_CAP_EXIT_HYPERCALL:
        return 0;
 }
 
+static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+{
+       int r;
+
+       switch (attr->attr) {
+       case KVM_VCPU_TSC_OFFSET:
+               r = 0;
+               break;
+       default:
+               r = -ENXIO;
+       }
+
+       return r;
+}
+
+static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+{
+       u64 __user *uaddr = (u64 __user *)(unsigned long)attr->addr;
+       int r;
+
+       if ((u64)(unsigned long)uaddr != attr->addr)
+               return -EFAULT;
+
+       switch (attr->attr) {
+       case KVM_VCPU_TSC_OFFSET:
+               r = -EFAULT;
+               if (put_user(vcpu->arch.l1_tsc_offset, uaddr))
+                       break;
+               r = 0;
+               break;
+       default:
+               r = -ENXIO;
+       }
+
+       return r;
+}
+
+static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+{
+       u64 __user *uaddr = (u64 __user *)(unsigned long)attr->addr;
+       struct kvm *kvm = vcpu->kvm;
+       int r;
+
+       if ((u64)(unsigned long)uaddr != attr->addr)
+               return -EFAULT;
+
+       switch (attr->attr) {
+       case KVM_VCPU_TSC_OFFSET: {
+               u64 offset, tsc, ns;
+               unsigned long flags;
+               bool matched;
+
+               r = -EFAULT;
+               if (get_user(offset, uaddr))
+                       break;
+
+               raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
+
+               matched = (vcpu->arch.virtual_tsc_khz &&
+                          kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
+                          kvm->arch.last_tsc_offset == offset);
+
+               tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
+               ns = get_kvmclock_base_ns();
+
+               __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched);
+               raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+
+               r = 0;
+               break;
+       }
+       default:
+               r = -ENXIO;
+       }
+
+       return r;
+}
+
+static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
+                                     unsigned int ioctl,
+                                     void __user *argp)
+{
+       struct kvm_device_attr attr;
+       int r;
+
+       if (copy_from_user(&attr, argp, sizeof(attr)))
+               return -EFAULT;
+
+       if (attr.group != KVM_VCPU_TSC_CTRL)
+               return -ENXIO;
+
+       switch (ioctl) {
+       case KVM_HAS_DEVICE_ATTR:
+               r = kvm_arch_tsc_has_attr(vcpu, &attr);
+               break;
+       case KVM_GET_DEVICE_ATTR:
+               r = kvm_arch_tsc_get_attr(vcpu, &attr);
+               break;
+       case KVM_SET_DEVICE_ATTR:
+               r = kvm_arch_tsc_set_attr(vcpu, &attr);
+               break;
+       }
+
+       return r;
+}
+
 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
                                     struct kvm_enable_cap *cap)
 {
                r = __set_sregs2(vcpu, u.sregs2);
                break;
        }
+       case KVM_HAS_DEVICE_ATTR:
+       case KVM_GET_DEVICE_ATTR:
+       case KVM_SET_DEVICE_ATTR:
+               r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp);
+               break;
        default:
                r = -EINVAL;
        }