Ignore:
Timestamp:
Apr 21, 2017, 4:28:29 AM (5 weeks ago)
Author:
brainslayer
Message:

update

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/linux/universal/linux-4.9/arch/x86/include/asm/pmem.h

    r31574 r31885  
    5656 *
    5757 * Write back a cache range using the CLWB (cache line write back)
    58  * instruction.
     58 * instruction. Note that @size is internally rounded up to be cache
     59 * line size aligned.
    5960 */
    6061static inline void arch_wb_cache_pmem(void *addr, size_t size)
     
    6869             p < vend; p += x86_clflush_size)
    6970                clwb(p);
    70 }
    71 
    72 /*
    73  * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
    74  * iterators, so for other types (bvec & kvec) we must do a cache write-back.
    75  */
    76 static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
    77 {
    78         return iter_is_iovec(i) == false;
    7971}
    8072
     
    9587        len = copy_from_iter_nocache(addr, bytes, i);
    9688
    97         if (__iter_needs_pmem_wb(i))
     89        /*
     90         * In the iovec case on x86_64 copy_from_iter_nocache() uses
     91         * non-temporal stores for the bulk of the transfer, but we need
     92         * to manually flush if the transfer is unaligned. A cached
     93         * memory copy is used when destination or size is not naturally
     94         * aligned. That is:
     95         *   - Require 8-byte alignment when size is 8 bytes or larger.
     96         *   - Require 4-byte alignment when size is 4 bytes.
     97         *
     98         * In the non-iovec case the entire destination needs to be
     99         * flushed.
     100         */
     101        if (iter_is_iovec(i)) {
     102                unsigned long flushed, dest = (unsigned long) addr;
     103
     104                if (bytes < 8) {
     105                        if (!IS_ALIGNED(dest, 4) || (bytes != 4))
     106                                arch_wb_cache_pmem(addr, 1);
     107                } else {
     108                        if (!IS_ALIGNED(dest, 8)) {
     109                                dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
     110                                arch_wb_cache_pmem(addr, 1);
     111                        }
     112
     113                        flushed = dest - (unsigned long) addr;
     114                        if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
     115                                arch_wb_cache_pmem(addr + bytes - 1, 1);
     116                }
     117        } else
    98118                arch_wb_cache_pmem(addr, bytes);
    99119
Note: See TracChangeset for help on using the changeset viewer.