blob: c2af613ec29779b8ee7eb84db089888a98975e15 [file] [log] [blame]
Ross Zwisler61031952015-06-25 03:08:39 -04001/*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __PMEM_H__
14#define __PMEM_H__
15
16#include <linux/io.h>
17
18#ifdef CONFIG_ARCH_HAS_PMEM_API
Ross Zwisler40603522015-08-18 13:55:36 -060019#include <asm/pmem.h>
Ross Zwisler61031952015-06-25 03:08:39 -040020#else
21static inline void arch_wmb_pmem(void)
22{
23 BUG();
24}
25
26static inline bool __arch_has_wmb_pmem(void)
27{
28 return false;
29}
30
Ross Zwisler61031952015-06-25 03:08:39 -040031static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
32 size_t n)
33{
34 BUG();
35}
36#endif
37
38/*
39 * Architectures that define ARCH_HAS_PMEM_API must provide
Dan Williamse836a252015-08-12 18:42:56 -040040 * implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), and
41 * __arch_has_wmb_pmem().
Ross Zwisler61031952015-06-25 03:08:39 -040042 */
43
44static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
45{
46 memcpy(dst, (void __force const *) src, size);
47}
48
Christoph Hellwig708ab622015-08-10 23:07:08 -040049static inline void memunmap_pmem(struct device *dev, void __pmem *addr)
Ross Zwisler61031952015-06-25 03:08:39 -040050{
Christoph Hellwig708ab622015-08-10 23:07:08 -040051 devm_memunmap(dev, (void __force *) addr);
Ross Zwisler61031952015-06-25 03:08:39 -040052}
53
54/**
55 * arch_has_wmb_pmem - true if wmb_pmem() ensures durability
56 *
57 * For a given cpu implementation within an architecture it is possible
58 * that wmb_pmem() resolves to a nop. In the case this returns
59 * false, pmem api users are unable to ensure durability and may want to
60 * fall back to a different data consistency model, or otherwise notify
61 * the user.
62 */
63static inline bool arch_has_wmb_pmem(void)
64{
65 if (IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
66 return __arch_has_wmb_pmem();
67 return false;
68}
69
70static inline bool arch_has_pmem_api(void)
71{
72 return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && arch_has_wmb_pmem();
73}
74
75/*
76 * These defaults seek to offer decent performance and minimize the
77 * window between i/o completion and writes being durable on media.
78 * However, it is undefined / architecture specific whether
79 * default_memremap_pmem + default_memcpy_to_pmem is sufficient for
80 * making data durable relative to i/o completion.
81 */
Dan Williamse836a252015-08-12 18:42:56 -040082static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
Ross Zwisler61031952015-06-25 03:08:39 -040083 size_t size)
84{
85 memcpy((void __force *) dst, src, size);
86}
87
Ross Zwisler61031952015-06-25 03:08:39 -040088/**
89 * memremap_pmem - map physical persistent memory for pmem api
90 * @offset: physical address of persistent memory
91 * @size: size of the mapping
92 *
93 * Establish a mapping of the architecture specific memory type expected
94 * by memcpy_to_pmem() and wmb_pmem(). For example, it may be
95 * the case that an uncacheable or writethrough mapping is sufficient,
96 * or a writeback mapping provided memcpy_to_pmem() and
97 * wmb_pmem() arrange for the data to be written through the
98 * cache to persistent media.
99 */
Christoph Hellwig708ab622015-08-10 23:07:08 -0400100static inline void __pmem *memremap_pmem(struct device *dev,
101 resource_size_t offset, unsigned long size)
Ross Zwisler61031952015-06-25 03:08:39 -0400102{
Dan Williamse836a252015-08-12 18:42:56 -0400103#ifdef ARCH_MEMREMAP_PMEM
Christoph Hellwig708ab622015-08-10 23:07:08 -0400104 return (void __pmem *) devm_memremap(dev, offset, size,
105 ARCH_MEMREMAP_PMEM);
Dan Williamse836a252015-08-12 18:42:56 -0400106#else
Christoph Hellwig708ab622015-08-10 23:07:08 -0400107 return (void __pmem *) devm_memremap(dev, offset, size,
108 MEMREMAP_WT);
Dan Williamse836a252015-08-12 18:42:56 -0400109#endif
Ross Zwisler61031952015-06-25 03:08:39 -0400110}
111
112/**
113 * memcpy_to_pmem - copy data to persistent memory
114 * @dst: destination buffer for the copy
115 * @src: source buffer for the copy
116 * @n: length of the copy in bytes
117 *
118 * Perform a memory copy that results in the destination of the copy
119 * being effectively evicted from, or never written to, the processor
120 * cache hierarchy after the copy completes. After memcpy_to_pmem()
121 * data may still reside in cpu or platform buffers, so this operation
122 * must be followed by a wmb_pmem().
123 */
124static inline void memcpy_to_pmem(void __pmem *dst, const void *src, size_t n)
125{
126 if (arch_has_pmem_api())
127 arch_memcpy_to_pmem(dst, src, n);
128 else
129 default_memcpy_to_pmem(dst, src, n);
130}
131
132/**
133 * wmb_pmem - synchronize writes to persistent memory
134 *
135 * After a series of memcpy_to_pmem() operations this drains data from
136 * cpu write buffers and any platform (memory controller) buffers to
137 * ensure that written data is durable on persistent memory media.
138 */
139static inline void wmb_pmem(void)
140{
141 if (arch_has_pmem_api())
142 arch_wmb_pmem();
143}
144#endif /* __PMEM_H__ */