Write an unsigned value with the specified number of bytes to memory via an effective address.
The effective address is translated to a canonical linear address. If this translation is successful and no other errors occur (like alignment errors), then wml-size is called.
Prior to the effective address translation, we check whether write access is allowed. In 32-bit mode, write access is allowed in data segments (DS, ES, FS, GS, and SS) if the W bit in the segment descriptor is 1; write access is disallowed in code segments (this is not explicitly mentioned in Intel manual, May'18, Volume 3, Section 3.4.5.1, but it seems reasonable. In 64-bit mode, the W bit is ignored (see Atmel manual, Dec'17, Volume 2, Section 4.8.1); by analogy, we allow write access to the code segment as well. These checks may be slightly optimized using the invariant that SS.W must be 1 when SS is loaded.
Function:
(defun wme-size$inline (proc-mode nbytes eff-addr seg-reg val check-alignment? x86 mem-ptr?) (declare (xargs :stobjs (x86))) (declare (type (integer 0 4) proc-mode) (type (member 1 2 4 6 8 10 16 32) nbytes) (type (signed-byte 64) eff-addr) (type (integer 0 5) seg-reg) (type (integer 0 *) val)) (declare (xargs :guard (and (booleanp check-alignment?) (booleanp mem-ptr?)))) (declare (xargs :guard (case nbytes (1 (n08p val)) (2 (n16p val)) (4 (n32p val)) (6 (n48p val)) (8 (n64p val)) (10 (n80p val)) (16 (n128p val)) (32 (n256p val))))) (b* (((when (and (/= proc-mode 0) (or (= seg-reg 1) (b* ((attr (loghead 16 (seg-hidden-attri seg-reg x86))) (w (data-segment-descriptor-attributesbits->w attr))) (= w 0))))) (mv (list :non-writable-segment eff-addr seg-reg) x86)) ((mv flg lin-addr) (ea-to-la proc-mode eff-addr seg-reg nbytes x86)) ((when flg) (mv flg x86)) ((unless (or (not check-alignment?) (address-aligned-p lin-addr nbytes mem-ptr?))) (mv (list :unaligned-linear-address lin-addr) x86))) (wml-size nbytes lin-addr val x86)))
Theorem:
(defthm x86p-of-wme-size.x86-new (implies (x86p x86) (b* (((mv ?flg ?x86-new) (wme-size$inline proc-mode nbytes eff-addr seg-reg val check-alignment? x86 mem-ptr?))) (x86p x86-new))) :rule-classes :rewrite)
Theorem:
(defthm wme-size-when-64-bit-modep-and-not-fs/gs (implies (and (not (equal seg-reg 4)) (not (equal seg-reg 5)) (canonical-address-p eff-addr) (or (not check-alignment?) (address-aligned-p eff-addr nbytes mem-ptr?))) (equal (wme-size 0 nbytes eff-addr seg-reg val check-alignment? x86 :mem-ptr? mem-ptr?) (wml-size nbytes eff-addr val x86))))
Theorem:
(defthm wme-size-when-64-bit-modep-fs/gs (implies (or (equal seg-reg 4) (equal seg-reg 5)) (equal (wme-size 0 nbytes eff-addr seg-reg val check-alignment? x86 :mem-ptr? mem-ptr?) (b* (((mv flg lin-addr) (b* (((mv base & &) (segment-base-and-bounds 0 seg-reg x86)) (lin-addr (i64 (+ base (n64 eff-addr))))) (if (canonical-address-p lin-addr) (mv nil lin-addr) (mv (list :non-canonical-address lin-addr) 0)))) ((when flg) (mv flg x86)) ((unless (or (not check-alignment?) (address-aligned-p lin-addr nbytes mem-ptr?))) (mv (list :unaligned-linear-address lin-addr) x86))) (wml-size nbytes lin-addr val x86)))))
Theorem:
(defthm wme-size-64-bit-unaligned-when-64-bit-modep-and-not-fs/gs (implies (and (not (equal seg-reg 4)) (not (equal seg-reg 5)) (not (or (not check-alignment?) (address-aligned-p eff-addr nbytes mem-ptr?))) (canonical-address-p eff-addr)) (equal (wme-size 0 nbytes eff-addr seg-reg val check-alignment? x86 :mem-ptr? mem-ptr?) (list (list :unaligned-linear-address eff-addr) x86))))
Theorem:
(defthm wme-size-non-canonical-when-64-bit-modep-and-not-fs/gs (implies (and (not (equal seg-reg 4)) (not (equal seg-reg 5)) (not (canonical-address-p eff-addr))) (equal (wme-size 0 nbytes eff-addr seg-reg val check-alignment? x86 :mem-ptr? mem-ptr?) (list (list :non-canonical-address eff-addr) x86))))