source: src/linux/universal/linux-4.9/arch/s390/include/asm/processor.h @ 31662

Last change on this file since 31662 was 31662, checked in by brainslayer, 4 months ago

use new squashfs in all kernels

File size: 9.1 KB
Line 
1/*
2 *  S390 version
3 *    Copyright IBM Corp. 1999
4 *    Author(s): Hartmut Penner (hp@de.ibm.com),
5 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 *  Derived from "include/asm-i386/processor.h"
8 *    Copyright (C) 1994, Linus Torvalds
9 */
10
11#ifndef __ASM_S390_PROCESSOR_H
12#define __ASM_S390_PROCESSOR_H
13
14#include <linux/const.h>
15
16#define CIF_MCCK_PENDING        0       /* machine check handling is pending */
17#define CIF_ASCE                1       /* user asce needs fixup / uaccess */
18#define CIF_NOHZ_DELAY          2       /* delay HZ disable for a tick */
19#define CIF_FPU                 3       /* restore FPU registers */
20#define CIF_IGNORE_IRQ          4       /* ignore interrupt (for udelay) */
21#define CIF_ENABLED_WAIT        5       /* in enabled wait state */
22
23#define _CIF_MCCK_PENDING       _BITUL(CIF_MCCK_PENDING)
24#define _CIF_ASCE               _BITUL(CIF_ASCE)
25#define _CIF_NOHZ_DELAY         _BITUL(CIF_NOHZ_DELAY)
26#define _CIF_FPU                _BITUL(CIF_FPU)
27#define _CIF_IGNORE_IRQ         _BITUL(CIF_IGNORE_IRQ)
28#define _CIF_ENABLED_WAIT       _BITUL(CIF_ENABLED_WAIT)
29
30#ifndef __ASSEMBLY__
31
32#include <linux/linkage.h>
33#include <linux/irqflags.h>
34#include <asm/cpu.h>
35#include <asm/page.h>
36#include <asm/ptrace.h>
37#include <asm/setup.h>
38#include <asm/runtime_instr.h>
39#include <asm/fpu/types.h>
40#include <asm/fpu/internal.h>
41
42static inline void set_cpu_flag(int flag)
43{
44        S390_lowcore.cpu_flags |= (1UL << flag);
45}
46
47static inline void clear_cpu_flag(int flag)
48{
49        S390_lowcore.cpu_flags &= ~(1UL << flag);
50}
51
52static inline int test_cpu_flag(int flag)
53{
54        return !!(S390_lowcore.cpu_flags & (1UL << flag));
55}
56
57/*
58 * Test CIF flag of another CPU. The caller needs to ensure that
59 * CPU hotplug can not happen, e.g. by disabling preemption.
60 */
61static inline int test_cpu_flag_of(int flag, int cpu)
62{
63        struct lowcore *lc = lowcore_ptr[cpu];
64        return !!(lc->cpu_flags & (1UL << flag));
65}
66
67#define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
68
69/*
70 * Default implementation of macro that returns current
71 * instruction pointer ("program counter").
72 */
73#define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
74
75static inline void get_cpu_id(struct cpuid *ptr)
76{
77        asm volatile("stidp %0" : "=Q" (*ptr));
78}
79
80void s390_adjust_jiffies(void);
81void s390_update_cpu_mhz(void);
82void cpu_detect_mhz_feature(void);
83
84extern const struct seq_operations cpuinfo_op;
85extern int sysctl_ieee_emulation_warnings;
86extern void execve_tail(void);
87
88/*
89 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
90 */
91
92#define TASK_SIZE_OF(tsk)       ((tsk)->mm ? \
93                                 (tsk)->mm->context.asce_limit : TASK_MAX_SIZE)
94#define TASK_UNMAPPED_BASE      (test_thread_flag(TIF_31BIT) ? \
95                                        (1UL << 30) : (1UL << 41))
96#define TASK_SIZE               TASK_SIZE_OF(current)
97#define TASK_MAX_SIZE           (1UL << 53)
98
99#define STACK_TOP               (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
100#define STACK_TOP_MAX           (1UL << 42)
101
102#define HAVE_ARCH_PICK_MMAP_LAYOUT
103
104typedef struct {
105        __u32 ar4;
106} mm_segment_t;
107
108/*
109 * Thread structure
110 */
111struct thread_struct {
112        unsigned int  acrs[NUM_ACRS];
113        unsigned long ksp;              /* kernel stack pointer             */
114        mm_segment_t mm_segment;
115        unsigned long gmap_addr;        /* address of last gmap fault. */
116        unsigned int gmap_write_flag;   /* gmap fault write indication */
117        unsigned int gmap_int_code;     /* int code of last gmap fault */
118        unsigned int gmap_pfault;       /* signal of a pending guest pfault */
119        struct per_regs per_user;       /* User specified PER registers */
120        struct per_event per_event;     /* Cause of the last PER trap */
121        unsigned long per_flags;        /* Flags to control debug behavior */
122        /* pfault_wait is used to block the process on a pfault event */
123        unsigned long pfault_wait;
124        struct list_head list;
125        /* cpu runtime instrumentation */
126        struct runtime_instr_cb *ri_cb;
127        unsigned char trap_tdb[256];    /* Transaction abort diagnose block */
128        /*
129         * Warning: 'fpu' is dynamically-sized. It *MUST* be at
130         * the end.
131         */
132        struct fpu fpu;                 /* FP and VX register save area */
133};
134
135/* Flag to disable transactions. */
136#define PER_FLAG_NO_TE                  1UL
137/* Flag to enable random transaction aborts. */
138#define PER_FLAG_TE_ABORT_RAND          2UL
139/* Flag to specify random transaction abort mode:
140 * - abort each transaction at a random instruction before TEND if set.
141 * - abort random transactions at a random instruction if cleared.
142 */
143#define PER_FLAG_TE_ABORT_RAND_TEND     4UL
144
145typedef struct thread_struct thread_struct;
146
147/*
148 * Stack layout of a C stack frame.
149 */
150#ifndef __PACK_STACK
151struct stack_frame {
152        unsigned long back_chain;
153        unsigned long empty1[5];
154        unsigned long gprs[10];
155        unsigned int  empty2[8];
156};
157#else
158struct stack_frame {
159        unsigned long empty1[5];
160        unsigned int  empty2[8];
161        unsigned long gprs[10];
162        unsigned long back_chain;
163};
164#endif
165
166#define ARCH_MIN_TASKALIGN      8
167
168#define INIT_THREAD {                                                   \
169        .ksp = sizeof(init_stack) + (unsigned long) &init_stack,        \
170        .fpu.regs = (void *) init_task.thread.fpu.fprs,                 \
171}
172
173/*
174 * Do necessary setup to start up a new thread.
175 */
176#define start_thread(regs, new_psw, new_stackp) do {                    \
177        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;    \
178        regs->psw.addr  = new_psw;                                      \
179        regs->gprs[15]  = new_stackp;                                   \
180        execve_tail();                                                  \
181} while (0)
182
183#define start_thread31(regs, new_psw, new_stackp) do {                  \
184        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_BA;                  \
185        regs->psw.addr  = new_psw;                                      \
186        regs->gprs[15]  = new_stackp;                                   \
187        crst_table_downgrade(current->mm);                              \
188        execve_tail();                                                  \
189} while (0)
190
191/* Forward declaration, a strange C thing */
192struct task_struct;
193struct mm_struct;
194struct seq_file;
195
196typedef int (*dump_trace_func_t)(void *data, unsigned long address, int reliable);
197void dump_trace(dump_trace_func_t func, void *data,
198                struct task_struct *task, unsigned long sp);
199
200void show_cacheinfo(struct seq_file *m);
201
202/* Free all resources held by a thread. */
203extern void release_thread(struct task_struct *);
204
205/*
206 * Return saved PC of a blocked thread.
207 */
208extern unsigned long thread_saved_pc(struct task_struct *t);
209
210unsigned long get_wchan(struct task_struct *p);
211#define task_pt_regs(tsk) ((struct pt_regs *) \
212        (task_stack_page(tsk) + THREAD_SIZE) - 1)
213#define KSTK_EIP(tsk)   (task_pt_regs(tsk)->psw.addr)
214#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->gprs[15])
215
216/* Has task runtime instrumentation enabled ? */
217#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
218
219static inline unsigned long current_stack_pointer(void)
220{
221        unsigned long sp;
222
223        asm volatile("la %0,0(15)" : "=a" (sp));
224        return sp;
225}
226
227static inline unsigned short stap(void)
228{
229        unsigned short cpu_address;
230
231        asm volatile("stap %0" : "=m" (cpu_address));
232        return cpu_address;
233}
234
235/*
236 * Give up the time slice of the virtual PU.
237 */
238void cpu_relax(void);
239
240#define cpu_relax_lowlatency()  barrier()
241
242#define ECAG_CACHE_ATTRIBUTE    0
243#define ECAG_CPU_ATTRIBUTE      1
244
245static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
246{
247        unsigned long val;
248
249        asm volatile(".insn     rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
250                     : "=d" (val) : "a" (asi << 8 | parm));
251        return val;
252}
253
254static inline void psw_set_key(unsigned int key)
255{
256        asm volatile("spka 0(%0)" : : "d" (key));
257}
258
259/*
260 * Set PSW to specified value.
261 */
262static inline void __load_psw(psw_t psw)
263{
264        asm volatile("lpswe %0" : : "Q" (psw) : "cc");
265}
266
267/*
268 * Set PSW mask to specified value, while leaving the
269 * PSW addr pointing to the next instruction.
270 */
271static inline void __load_psw_mask(unsigned long mask)
272{
273        unsigned long addr;
274        psw_t psw;
275
276        psw.mask = mask;
277
278        asm volatile(
279                "       larl    %0,1f\n"
280                "       stg     %0,%O1+8(%R1)\n"
281                "       lpswe   %1\n"
282                "1:"
283                : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
284}
285
286/*
287 * Extract current PSW mask
288 */
289static inline unsigned long __extract_psw(void)
290{
291        unsigned int reg1, reg2;
292
293        asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2));
294        return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
295}
296
297static inline void local_mcck_enable(void)
298{
299        __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK);
300}
301
302static inline void local_mcck_disable(void)
303{
304        __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK);
305}
306
307/*
308 * Rewind PSW instruction address by specified number of bytes.
309 */
310static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
311{
312        unsigned long mask;
313
314        mask = (psw.mask & PSW_MASK_EA) ? -1UL :
315               (psw.mask & PSW_MASK_BA) ? (1UL << 31) - 1 :
316                                          (1UL << 24) - 1;
317        return (psw.addr - ilc) & mask;
318}
319
320/*
321 * Function to stop a processor until the next interrupt occurs
322 */
323void enabled_wait(void);
324
325/*
326 * Function to drop a processor into disabled wait state
327 */
328static inline void __noreturn disabled_wait(unsigned long code)
329{
330        psw_t psw;
331
332        psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA;
333        psw.addr = code;
334        __load_psw(psw);
335        while (1);
336}
337
338/*
339 * Basic Machine Check/Program Check Handler.
340 */
341
342extern void s390_base_mcck_handler(void);
343extern void s390_base_pgm_handler(void);
344extern void s390_base_ext_handler(void);
345
346extern void (*s390_base_mcck_handler_fn)(void);
347extern void (*s390_base_pgm_handler_fn)(void);
348extern void (*s390_base_ext_handler_fn)(void);
349
350#define ARCH_LOW_ADDRESS_LIMIT  0x7fffffffUL
351
352extern int memcpy_real(void *, void *, size_t);
353extern void memcpy_absolute(void *, void *, size_t);
354
355#define mem_assign_absolute(dest, val) {                        \
356        __typeof__(dest) __tmp = (val);                         \
357                                                                \
358        BUILD_BUG_ON(sizeof(__tmp) != sizeof(val));             \
359        memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));        \
360}
361
362#endif /* __ASSEMBLY__ */
363
364#endif /* __ASM_S390_PROCESSOR_H */
Note: See TracBrowser for help on using the repository browser.