aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/reboot.c
blob: 7c8cd447d5ed520434fac0d5802cc593946857a9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/efi.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/reboot_fixups.h>
#include <asm/reboot.h>
#include <asm/pci_x86.h>
#include <asm/virtext.h>
#include <asm/cpu.h>

#ifdef CONFIG_X86_32
# include <linux/dmi.h>
# include <linux/ctype.h>
# include <linux/mc146818rtc.h>
#else
# include <asm/iommu.h>
#endif

#include <asm/apic.h>

/*
 * Power off function, if any
 */
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);

static const struct desc_ptr no_idt = {};
static int reboot_mode;
enum reboot_type reboot_type = BOOT_KBD;
int reboot_force;

#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
static int reboot_cpu = -1;
#endif

/* This is set if we need to go through the 'emergency' path.
 * When machine_emergency_restart() is called, we may be on
 * an inconsistent state and won't be able to do a clean cleanup
 */
static int reboot_emergency;

/* This is set by the PCI code if either type 1 or type 2 PCI is detected */
bool port_cf9_safe = false;

/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci]
   warm   Don't set the cold reboot flag
   cold   Set the cold reboot flag
   bios   Reboot by jumping through the BIOS (only for X86_32)
   smp    Reboot by executing reset on BSP or other CPU (only for X86_32)
   triple Force a triple fault (init)
   kbd    Use the keyboard controller. cold reset (default)
   acpi   Use the RESET_REG in the FADT
   efi    Use efi reset_system runtime service
   pci    Use the so-called "PCI reset register", CF9
   force  Avoid anything that could hang.
 */
static int __init reboot_setup(char *str)
{
	for (;;) {
		switch (*str) {
		case 'w':
			reboot_mode = 0x1234;
			break;

		case 'c':
			reboot_mode = 0;
			break;

#ifdef CONFIG_X86_32
#ifdef CONFIG_SMP
		case 's':
			if (isdigit(*(str+1))) {
				reboot_cpu = (int) (*(str+1) - '0');
				if (isdigit(*(str+2)))
					reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
			}
				/* we will leave sorting out the final value
				   when we are ready to reboot, since we might not
				   have set up boot_cpu_id or smp_num_cpu */
			break;
#endif /* CONFIG_SMP */

		case 'b':
#endif
		case 'a':
		case 'k':
		case 't':
		case 'e':
		case 'p':
			reboot_type = *str;
			break;

		case 'f':
			reboot_force = 1;
			break;
		}

		str = strchr(str, ',');
		if (str)
			str++;
		else
			break;
	}
	return 1;
}

__setup("reboot=", reboot_setup);


#ifdef CONFIG_X86_32
/*
 * Reboot options and system auto-detection code provided by
 * Dell Inc. so their systems "just work". :-)
 */

/*
 * Some machines require the "reboot=b"  commandline option,
 * this quirk makes that automatic.
 */
static int __init set_bios_reboot(const struct dmi_system_id *d)
{
	if (reboot_type != BOOT_BIOS) {
		reboot_type = BOOT_BIOS;
		printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
	}
	return 0;
}

static struct dmi_system_id __initdata reboot_dmi_table[] = {
	{	/* Handle problems with rebooting on Dell E520's */
		.callback = set_bios_reboot,
		.ident = "Dell E520",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
			DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
		},
	},
	{	/* Handle problems with rebooting on Dell 1300's */
		.callback = set_bios_reboot,
		.ident = "Dell PowerEdge 1300",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
		},
	},
	{	/* Handle problems with rebooting on Dell 300's */
		.callback = set_bios_reboot,
		.ident = "Dell PowerEdge 300",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
		},
	},
	{       /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
		.callback = set_bios_reboot,
		.ident = "Dell OptiPlex 745",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
		},
	},
	{       /* Handle problems with rebooting on Dell Optiplex 745's DFF*/
		.callback = set_bios_reboot,
		.ident = "Dell OptiPlex 745",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
			DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
		},
	},
	{       /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
		.callback = set_bios_reboot,
		.ident = "Dell OptiPlex 745",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
			DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
		},
	},
	{   /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
		.callback = set_bios_reboot,
		.ident = "Dell OptiPlex 330",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
			DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
			DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
		},
	},
	{	/* Handle problems with rebooting on Dell 2400's */
		.callback = set_bios_reboot,
		.ident = "Dell PowerEdge 2400",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
		},
	},
	{	/* Handle problems with rebooting on Dell T5400's */
		.callback = set_bios_reboot,
		.ident = "Dell Precision T5400",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
			DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
		},
	},
	{	/* Handle problems with rebooting on HP laptops */
		.callback = set_bios_reboot,
		.ident = "HP Compaq Laptop",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
			DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
		},
	},
	{ }
};

static int __init reboot_init(void)
{
	dmi_check_system(reboot_dmi_table);
	return 0;
}
core_initcall(reboot_init);

/* The following code and data reboots the machine by switching to real
   mode and jumping to the BIOS reset entry point, as if the CPU has
   really been reset.  The previous version asked the keyboard
   controller to pulse the CPU reset line, which is more thorough, but
   doesn't work with at least one type of 486 motherboard.  It is easy
   to stop this code working; hence the copious comments. */
static const unsigned long long
real_mode_gdt_entries [3] =
{
	0x0000000000000000ULL,	/* Null descriptor */
	0x00009b000000ffffULL,	/* 16-bit real-mode 64k code at 0x00000000 */
	0x000093000100ffffULL	/* 16-bit real-mode 64k data at 0x00000100 */
};

static const struct desc_ptr
real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
real_mode_idt = { 0x3ff, 0 };

/* This is 16-bit protected mode code to disable paging and the cache,
   switch to real mode and jump to the BIOS reset code.

   The instruction that switches to real mode by writing to CR0 must be
   followed immediately by a far jump instruction, which set CS to a
   valid value for real mode, and flushes the prefetch queue to avoid
   running instructions that have already been decoded in protected
   mode.

   Clears all the flags except ET, especially PG (paging), PE
   (protected-mode enable) and TS (task switch for coprocessor state
   save).  Flushes the TLB after paging has been disabled.  Sets CD and
   NW, to disable the cache on a 486, and invalidates the cache.  This
   is more like the state of a 486 after reset.  I don't know if
   something else should be done for other chips.

   More could be done here to set up the registers as if a CPU reset had
   occurred; hopefully real BIOSs don't assume much. */
static const unsigned char real_mode_switch [] =
{
	0x66, 0x0f, 0x20, 0xc0,			/*    movl  %cr0,%eax        */
	0x66, 0x83, 0xe0, 0x11,			/*    andl  $0x00000011,%eax */
	0x66, 0x0d, 0x00, 0x00, 0x00, 0x60,	/*    orl   $0x60000000,%eax */
	0x66, 0x0f, 0x22, 0xc0,			/*    movl  %eax,%cr0        */
	0x66, 0x0f, 0x22, 0xd8,			/*    movl  %eax,%cr3        */
	0x66, 0x0f, 0x20, 0xc3,			/*    movl  %cr0,%ebx        */
	0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60,	/*    andl  $0x60000000,%ebx */
	0x74, 0x02,				/*    jz    f                */
	0x0f, 0x09,				/*    wbinvd                 */
	0x24, 0x10,				/* f: andb  $0x10,al         */
	0x66, 0x0f, 0x22, 0xc0			/*    movl  %eax,%cr0        */
};
static const unsigned char jump_to_bios [] =
{
	0xea, 0x00, 0x00, 0xff, 0xff		/*    ljmp  $0xffff,$0x0000  */
};

/*
 * Switch to real mode and then execute the code
 * specified by the code and length parameters.
 * We assume that length will aways be less that 100!
 */
void machine_real_restart(const unsigned char *code, int length)
{
	local_irq_disable();

	/* Write zero to CMOS register number 0x0f, which the BIOS POST
	   routine will recognize as telling it to do a proper reboot.  (Well
	   that's what this book in front of me says -- it may only apply to
	   the Phoenix BIOS though, it's not clear).  At the same time,
	   disable NMIs by setting the top bit in the CMOS address register,
	   as we're about to do peculiar things to the CPU.  I'm not sure if
	   `outb_p' is needed instead of just `outb'.  Use it to be on the
	   safe side.  (Yes, CMOS_WRITE does outb_p's. -  Paul G.)
	 */
	spin_lock(&rtc_lock);
	CMOS_WRITE(0x00, 0x8f);
	spin_unlock(&rtc_lock);

	/* Remap the kernel at virtual address zero, as well as offset zero
	   from the kernel segment.  This assumes the kernel segment starts at
	   virtual address PAGE_OFFSET. */
	memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
		sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);

	/*
	 * Use `swapper_pg_dir' as our page directory.
	 */
	load_cr3(swapper_pg_dir);

	/* Write 0x1234 to absolute memory location 0x472.  The BIOS reads
	   this on booting to tell it to "Bypass memory test (also warm
	   boot)".  This seems like a fairly standard thing that gets set by
	   REBOOT.COM programs, and the previous reset routine did this
	   too. */
	*((unsigned short *)0x472) = reboot_mode;

	/* For the switch to real mode, copy some code to low memory.  It has
	   to be in the first 64k because it is running in 16-bit mode, and it
	   has to have the same physical and virtual address, because it turns
	   off paging.  Copy it near the end of the first page, out of the way
	   of BIOS variables. */
	memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
		real_mode_switch, sizeof (real_mode_switch));
	memcpy((void *)(0x1000 - 100), code, length);

	/* Set up the IDT for real mode. */
	load_idt(&real_mode_idt);

	/* Set up a GDT from which we can load segment descriptors for real
	   mode.  The GDT is not used in real mode; it is just needed here to
	   prepare the descriptors. */
	load_gdt(&real_mode_gdt);

	/* Load the data segment registers, and thus the descriptors ready for
	   real mode.  The base address of each segment is 0x100, 16 times the
	   selector value being loaded here.  This is so that the segment
	   registers don't have to be reloaded after switching to real mode:
	   the values are consistent for real mode operation already. */
	__asm__ __volatile__ ("movl $0x0010,%%eax\n"
				"\tmovl %%eax,%%ds\n"
				"\tmovl %%eax,%%es\n"
				"\tmovl %%eax,%%fs\n"
				"\tmovl %%eax,%%gs\n"
				"\tmovl %%eax,%%ss" : : : "eax");

	/* Jump to the 16-bit code that we copied earlier.  It disables paging
	   and the cache, switches to real mode, and jumps to the BIOS reset
	   entry point. */
	__asm__ __volatile__ ("ljmp $0x0008,%0"
				:
				: "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
}
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(machine_real_restart);
#endif

#endif /* CONFIG_X86_32 */

static inline void kb_wait(void)
{
	int i;

	for (i = 0; i < 0x10000; i++) {
		if ((inb(0x64) & 0x02) == 0)
			break;
		udelay(2);
	}
}

static void vmxoff_nmi(int cpu, struct die_args *args)
{
	cpu_emergency_vmxoff();
}

/* Use NMIs as IPIs to tell all CPUs to disable virtualization
 */
static void emergency_vmx_disable_all(void)
{
	/* Just make sure we won't change CPUs while doing this */
	local_irq_disable();

	/* We need to disable VMX on all CPUs before rebooting, otherwise
	 * we risk hanging up the machine, because the CPU ignore INIT
	 * signals when VMX is enabled.
	 *
	 * We can't take any locks and we may be on an inconsistent
	 * state, so we use NMIs as IPIs to tell the other CPUs to disable
	 * VMX and halt.
	 *
	 * For safety, we will avoid running the nmi_shootdown_cpus()
	 * stuff unnecessarily, but we don't have a way to check
	 * if other CPUs have VMX enabled. So we will call it only if the
	 * CPU we are running on has VMX enabled.
	 *
	 * We will miss cases where VMX is not enabled on all CPUs. This
	 * shouldn't do much harm because KVM always enable VMX on all
	 * CPUs anyway. But we can miss it on the small window where KVM
	 * is still enabling VMX.
	 */
	if (cpu_has_vmx() && cpu_vmx_enabled()) {
		/* Disable VMX on this CPU.
		 */
		cpu_vmxoff();

		/* Halt and disable VMX on the other CPUs */
		nmi_shootdown_cpus(vmxoff_nmi);

	}
}


void __attribute__((weak)) mach_reboot_fixups(void)
{
}

static void native_machine_emergency_restart(void)
{
	int i;

	if (reboot_emergency)
		emergency_vmx_disable_all();

	/* Tell the BIOS if we want cold or warm reboot */
	*((unsigned short *)__va(0x472)) = reboot_mode;

	for (;;) {
		/* Could also try the reset bit in the Hammer NB */
		switch (reboot_type) {
		case BOOT_KBD:
			mach_reboot_fixups(); /* for board specific fixups */

			for (i = 0; i < 10; i++) {
				kb_wait();
				udelay(50);
				outb(0xfe, 0x64); /* pulse reset low */
				udelay(50);
			}

		case BOOT_TRIPLE:
			load_idt(&no_idt);
			__asm__ __volatile__("int3");

			reboot_type = BOOT_KBD;
			break;

#ifdef CONFIG_X86_32
		case BOOT_BIOS:
			machine_real_restart(jump_to_bios, sizeof(jump_to_bios));

			reboot_type = BOOT_KBD;
			break;
#endif

		case BOOT_ACPI:
			acpi_reboot();
			reboot_type = BOOT_KBD;
			break;

		case BOOT_EFI:
			if (efi_enabled)
				efi.reset_system(reboot_mode ?
						 EFI_RESET_WARM :
						 EFI_RESET_COLD,
						 EFI_SUCCESS, 0, NULL);
			reboot_type = BOOT_KBD;
			break;

		case BOOT_CF9:
			port_cf9_safe = true;
			/* fall through */

		case BOOT_CF9_COND:
			if (port_cf9_safe) {
				u8 cf9 = inb(0xcf9) & ~6;
				outb(cf9|2, 0xcf9); /* Request hard reset */
				udelay(50);
				outb(cf9|6, 0xcf9); /* Actually do the reset */
				udelay(50);
			}
			reboot_type = BOOT_KBD;
			break;
		}
	}
}

void native_machine_shutdown(void)
{
	/* Stop the cpus and apics */
#ifdef CONFIG_SMP

	/* The boot cpu is always logical cpu 0 */
	int reboot_cpu_id = 0;

#ifdef CONFIG_X86_32
	/* See if there has been given a command line override */
	if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
		cpu_online(reboot_cpu))
		reboot_cpu_id = reboot_cpu;
#endif

	/* Make certain the cpu I'm about to reboot on is online */
	if (!cpu_online(reboot_cpu_id))
		reboot_cpu_id = smp_processor_id();

	/* Make certain I only run on the appropriate processor */
	set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));

	/* O.K Now that I'm on the appropriate processor,
	 * stop all of the others.
	 */
	smp_send_stop();
#endif

	lapic_shutdown();

#ifdef CONFIG_X86_IO_APIC
	disable_IO_APIC();
#endif

#ifdef CONFIG_HPET_TIMER
	hpet_disable();
#endif

#ifdef CONFIG_X86_64
	pci_iommu_shutdown();
#endif
}

static void __machine_emergency_restart(int emergency)
{
	reboot_emergency = emergency;
	machine_ops.emergency_restart();
}

static void native_machine_restart(char *__unused)
{
	printk("machine restart\n");

	if (!reboot_force)
		machine_shutdown();
	__machine_emergency_restart(0);
}

static void native_machine_halt(void)
{
	/* stop other cpus and apics */
	machine_shutdown();

	/* stop this cpu */
	stop_this_cpu(NULL);
}

static void native_machine_power_off(void)
{
	if (pm_power_off) {
		if (!reboot_force)
			machine_shutdown();
		pm_power_off();
	}
}

struct machine_ops machine_ops = {
	.power_off = native_machine_power_off,
	.shutdown = native_machine_shutdown,
	.emergency_restart = native_machine_emergency_restart,
	.restart = native_machine_restart,
	.halt = native_machine_halt,
#ifdef CONFIG_KEXEC
	.crash_shutdown = native_machine_crash_shutdown,
#endif
};

void machine_power_off(void)
{
	machine_ops.power_off();
}

void machine_shutdown(void)
{
	machine_ops.shutdown();
}

void machine_emergency_restart(void)
{
	__machine_emergency_restart(1);
}

void machine_restart(char *cmd)
{
	machine_ops.restart(cmd);
}

void machine_halt(void)
{
	machine_ops.halt();
}

#ifdef CONFIG_KEXEC
void machine_crash_shutdown(struct pt_regs *regs)
{
	machine_ops.crash_shutdown(regs);
}
#endif


#if defined(CONFIG_SMP)

/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu;
static nmi_shootdown_cb shootdown_callback;

static atomic_t waiting_for_crash_ipi;

static int crash_nmi_callback(struct notifier_block *self,
			unsigned long val, void *data)
{
	int cpu;

	if (val != DIE_NMI_IPI)
		return NOTIFY_OK;

	cpu = raw_smp_processor_id();

	/* Don't do anything if this handler is invoked on crashing cpu.
	 * Otherwise, system will completely hang. Crashing cpu can get
	 * an NMI if system was initially booted with nmi_watchdog parameter.
	 */
	if (cpu == crashing_cpu)
		return NOTIFY_STOP;
	local_irq_disable();

	shootdown_callback(cpu, (struct die_args *)data);

	atomic_dec(&waiting_for_crash_ipi);
	/* Assume hlt works */
	halt();
	for (;;)
		cpu_relax();

	return 1;
}

static void smp_send_nmi_allbutself(void)
{
	apic->send_IPI_allbutself(NMI_VECTOR);
}

static struct notifier_block crash_nmi_nb = {
	.notifier_call = crash_nmi_callback,
};

/* Halt all other CPUs, calling the specified function on each of them
 *
 * This function can be used to halt all other CPUs on crash
 * or emergency reboot time. The function passed as parameter
 * will be called inside a NMI handler on all CPUs.
 */
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
	unsigned long msecs;
	local_irq_disable();

	/* Make a note of crashing cpu. Will be used in NMI callback.*/
	crashing_cpu = safe_smp_processor_id();

	shootdown_callback = callback;

	atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
	/* Would it be better to replace the trap vector here? */
	if (register_die_notifier(&crash_nmi_nb))
		return;		/* return what? */
	/* Ensure the new callback function is set before sending
	 * out the NMI
	 */
	wmb();

	smp_send_nmi_allbutself();

	msecs = 1000; /* Wait at most a second for the other cpus to stop */
	while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
		mdelay(1);
		msecs--;
	}

	/* Leave the nmi callback set */
}
#else /* !CONFIG_SMP */
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
	/* No other CPUs to shoot down */
}
#endif
97 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964
/*
 *  Alpha emulation cpu translation for qemu.
 *
 *  Copyright (c) 2007 Jocelyn Mayer
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */

#include "qemu/osdep.h"
#include "cpu.h"
#include "sysemu/cpus.h"
#include "disas/disas.h"
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "tcg/tcg-op.h"
#include "exec/helper-proto.h"
#include "exec/helper-gen.h"
#include "exec/translator.h"
#include "exec/log.h"

#define HELPER_H "helper.h"
#include "exec/helper-info.c.inc"
#undef  HELPER_H

#undef ALPHA_DEBUG_DISAS
#define CONFIG_SOFTFLOAT_INLINE

#ifdef ALPHA_DEBUG_DISAS
#  define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
#else
#  define LOG_DISAS(...) do { } while (0)
#endif

typedef struct DisasContext DisasContext;
struct DisasContext {
    DisasContextBase base;

#ifdef CONFIG_USER_ONLY
    MemOp unalign;
#else
    uint64_t palbr;
#endif
    uint32_t tbflags;
    int mem_idx;

    /* implver and amask values for this CPU.  */
    int implver;
    int amask;

    /* Current rounding mode for this TB.  */
    int tb_rm;
    /* Current flush-to-zero setting for this TB.  */
    int tb_ftz;

    /* The set of registers active in the current context.  */
    TCGv *ir;

    /* Temporaries for $31 and $f31 as source and destination.  */
    TCGv zero;
    TCGv sink;
};

#ifdef CONFIG_USER_ONLY
#define UNALIGN(C)  (C)->unalign
#else
#define UNALIGN(C)  MO_ALIGN
#endif

/* Target-specific return values from translate_one, indicating the
   state of the TB.  Note that DISAS_NEXT indicates that we are not
   exiting the TB.  */
#define DISAS_PC_UPDATED_NOCHAIN  DISAS_TARGET_0
#define DISAS_PC_UPDATED          DISAS_TARGET_1
#define DISAS_PC_STALE            DISAS_TARGET_2

/* global register indexes */
static TCGv cpu_std_ir[31];
static TCGv cpu_fir[31];
static TCGv cpu_pc;
static TCGv cpu_lock_addr;
static TCGv cpu_lock_value;

#ifndef CONFIG_USER_ONLY
static TCGv cpu_pal_ir[31];
#endif

void alpha_translate_init(void)
{
#define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUAlphaState, V) }

    typedef struct { TCGv *var; const char *name; int ofs; } GlobalVar;
    static const GlobalVar vars[] = {
        DEF_VAR(pc),
        DEF_VAR(lock_addr),
        DEF_VAR(lock_value),
    };

#undef DEF_VAR

    /* Use the symbolic register names that match the disassembler.  */
    static const char greg_names[31][4] = {
        "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
        "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
        "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
        "t10", "t11", "ra", "t12", "at", "gp", "sp"
    };
    static const char freg_names[31][4] = {
        "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
        "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
        "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
        "f24", "f25", "f26", "f27", "f28", "f29", "f30"
    };
#ifndef CONFIG_USER_ONLY
    static const char shadow_names[8][8] = {
        "pal_t7", "pal_s0", "pal_s1", "pal_s2",
        "pal_s3", "pal_s4", "pal_s5", "pal_t11"
    };
#endif

    int i;

    for (i = 0; i < 31; i++) {
        cpu_std_ir[i] = tcg_global_mem_new_i64(tcg_env,
                                               offsetof(CPUAlphaState, ir[i]),
                                               greg_names[i]);
    }

    for (i = 0; i < 31; i++) {
        cpu_fir[i] = tcg_global_mem_new_i64(tcg_env,
                                            offsetof(CPUAlphaState, fir[i]),
                                            freg_names[i]);
    }

#ifndef CONFIG_USER_ONLY
    memcpy(cpu_pal_ir, cpu_std_ir, sizeof(cpu_pal_ir));
    for (i = 0; i < 8; i++) {
        int r = (i == 7 ? 25 : i + 8);
        cpu_pal_ir[r] = tcg_global_mem_new_i64(tcg_env,
                                               offsetof(CPUAlphaState,
                                                        shadow[i]),
                                               shadow_names[i]);
    }
#endif

    for (i = 0; i < ARRAY_SIZE(vars); ++i) {
        const GlobalVar *v = &vars[i];
        *v->var = tcg_global_mem_new_i64(tcg_env, v->ofs, v->name);
    }
}

static TCGv load_zero(DisasContext *ctx)
{
    if (!ctx->zero) {
        ctx->zero = tcg_constant_i64(0);
    }
    return ctx->zero;
}

static TCGv dest_sink(DisasContext *ctx)
{
    if (!ctx->sink) {
        ctx->sink = tcg_temp_new();
    }
    return ctx->sink;
}

static void free_context_temps(DisasContext *ctx)
{
    if (ctx->sink) {
        tcg_gen_discard_i64(ctx->sink);
        ctx->sink = NULL;
    }
}

static TCGv load_gpr(DisasContext *ctx, unsigned reg)
{
    if (likely(reg < 31)) {
        return ctx->ir[reg];
    } else {
        return load_zero(ctx);
    }
}

static TCGv load_gpr_lit(DisasContext *ctx, unsigned reg,
                         uint8_t lit, bool islit)
{
    if (islit) {
        return tcg_constant_i64(lit);
    } else if (likely(reg < 31)) {
        return ctx->ir[reg];
    } else {
        return load_zero(ctx);
    }
}

static TCGv dest_gpr(DisasContext *ctx, unsigned reg)
{
    if (likely(reg < 31)) {
        return ctx->ir[reg];
    } else {
        return dest_sink(ctx);
    }
}

static TCGv load_fpr(DisasContext *ctx, unsigned reg)
{
    if (likely(reg < 31)) {
        return cpu_fir[reg];
    } else {
        return load_zero(ctx);
    }
}

static TCGv dest_fpr(DisasContext *ctx, unsigned reg)
{
    if (likely(reg < 31)) {
        return cpu_fir[reg];
    } else {
        return dest_sink(ctx);
    }
}

static int get_flag_ofs(unsigned shift)
{
    int ofs = offsetof(CPUAlphaState, flags);
#if HOST_BIG_ENDIAN
    ofs += 3 - (shift / 8);
#else
    ofs += shift / 8;
#endif
    return ofs;
}

static void ld_flag_byte(TCGv val, unsigned shift)
{
    tcg_gen_ld8u_i64(val, tcg_env, get_flag_ofs(shift));
}

static void st_flag_byte(TCGv val, unsigned shift)
{
    tcg_gen_st8_i64(val, tcg_env, get_flag_ofs(shift));
}

static void gen_excp_1(int exception, int error_code)
{
    TCGv_i32 tmp1, tmp2;

    tmp1 = tcg_constant_i32(exception);
    tmp2 = tcg_constant_i32(error_code);
    gen_helper_excp(tcg_env, tmp1, tmp2);
}

static DisasJumpType gen_excp(DisasContext *ctx, int exception, int error_code)
{
    tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
    gen_excp_1(exception, error_code);
    return DISAS_NORETURN;
}

static inline DisasJumpType gen_invalid(DisasContext *ctx)
{
    return gen_excp(ctx, EXCP_OPCDEC, 0);
}

static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr)
{
    TCGv_i32 tmp32 = tcg_temp_new_i32();
    tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
    gen_helper_memory_to_f(dest, tmp32);
}

static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr)
{
    TCGv tmp = tcg_temp_new();
    tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
    gen_helper_memory_to_g(dest, tmp);
}

static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr)
{
    TCGv_i32 tmp32 = tcg_temp_new_i32();
    tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
    gen_helper_memory_to_s(dest, tmp32);
}

static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr)
{
    tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
}

static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
                        void (*func)(DisasContext *, TCGv, TCGv))
{
    /* Loads to $f31 are prefetches, which we can treat as nops. */
    if (likely(ra != 31)) {
        TCGv addr = tcg_temp_new();
        tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
        func(ctx, cpu_fir[ra], addr);
    }
}

static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
                         MemOp op, bool clear, bool locked)
{
    TCGv addr, dest;

    /* LDQ_U with ra $31 is UNOP.  Other various loads are forms of
       prefetches, which we can treat as nops.  No worries about
       missed exceptions here.  */
    if (unlikely(ra == 31)) {
        return;
    }

    addr = tcg_temp_new();
    tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
    if (clear) {
        tcg_gen_andi_i64(addr, addr, ~0x7);
    } else if (!locked) {
        op |= UNALIGN(ctx);
    }

    dest = ctx->ir[ra];
    tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op);

    if (locked) {
        tcg_gen_mov_i64(cpu_lock_addr, addr);
        tcg_gen_mov_i64(cpu_lock_value, dest);
    }
}

static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr)
{
    TCGv_i32 tmp32 = tcg_temp_new_i32();
    gen_helper_f_to_memory(tmp32, addr);
    tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
}

static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr)
{
    TCGv tmp = tcg_temp_new();
    gen_helper_g_to_memory(tmp, src);
    tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
}

static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr)
{
    TCGv_i32 tmp32 = tcg_temp_new_i32();
    gen_helper_s_to_memory(tmp32, src);
    tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL | UNALIGN(ctx));
}

static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr)
{
    tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEUQ | UNALIGN(ctx));
}

static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16,
                         void (*func)(DisasContext *, TCGv, TCGv))
{
    TCGv addr = tcg_temp_new();
    tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
    func(ctx, load_fpr(ctx, ra), addr);
}

static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16,
                          MemOp op, bool clear)
{
    TCGv addr, src;

    addr = tcg_temp_new();
    tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
    if (clear) {
        tcg_gen_andi_i64(addr, addr, ~0x7);
    } else {
        op |= UNALIGN(ctx);
    }

    src = load_gpr(ctx, ra);
    tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op);
}

static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb,
                                           int32_t disp16, int mem_idx,
                                           MemOp op)
{
    TCGLabel *lab_fail, *lab_done;
    TCGv addr, val;

    addr = tcg_temp_new_i64();
    tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16);
    free_context_temps(ctx);

    lab_fail = gen_new_label();
    lab_done = gen_new_label();
    tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail);

    val = tcg_temp_new_i64();
    tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value,
                               load_gpr(ctx, ra), mem_idx, op);
    free_context_temps(ctx);

    if (ra != 31) {
        tcg_gen_setcond_i64(TCG_COND_EQ, ctx->ir[ra], val, cpu_lock_value);
    }
    tcg_gen_br(lab_done);

    gen_set_label(lab_fail);
    if (ra != 31) {
        tcg_gen_movi_i64(ctx->ir[ra], 0);
    }

    gen_set_label(lab_done);
    tcg_gen_movi_i64(cpu_lock_addr, -1);
    return DISAS_NEXT;
}

static bool use_goto_tb(DisasContext *ctx, uint64_t dest)
{
    return translator_use_goto_tb(&ctx->base, dest);
}

static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
{
    uint64_t dest = ctx->base.pc_next + (disp << 2);

    if (ra != 31) {
        tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
    }

    /* Notice branch-to-next; used to initialize RA with the PC.  */
    if (disp == 0) {
        return 0;
    } else if (use_goto_tb(ctx, dest)) {
        tcg_gen_goto_tb(0);
        tcg_gen_movi_i64(cpu_pc, dest);
        tcg_gen_exit_tb(ctx->base.tb, 0);
        return DISAS_NORETURN;
    } else {
        tcg_gen_movi_i64(cpu_pc, dest);
        return DISAS_PC_UPDATED;
    }
}

static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond,
                                        TCGv cmp, uint64_t imm, int32_t disp)
{
    uint64_t dest = ctx->base.pc_next + (disp << 2);
    TCGLabel *lab_true = gen_new_label();

    if (use_goto_tb(ctx, dest)) {
        tcg_gen_brcondi_i64(cond, cmp, imm, lab_true);

        tcg_gen_goto_tb(0);
        tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
        tcg_gen_exit_tb(ctx->base.tb, 0);

        gen_set_label(lab_true);
        tcg_gen_goto_tb(1);
        tcg_gen_movi_i64(cpu_pc, dest);
        tcg_gen_exit_tb(ctx->base.tb, 1);

        return DISAS_NORETURN;
    } else {
        TCGv_i64 i = tcg_constant_i64(imm);
        TCGv_i64 d = tcg_constant_i64(dest);
        TCGv_i64 p = tcg_constant_i64(ctx->base.pc_next);

        tcg_gen_movcond_i64(cond, cpu_pc, cmp, i, d, p);
        return DISAS_PC_UPDATED;
    }
}

static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, int ra,
                               int32_t disp)
{
    return gen_bcond_internal(ctx, cond, load_gpr(ctx, ra),
                              is_tst_cond(cond), disp);
}

/* Fold -0.0 for comparison with COND.  */

static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src)
{
    TCGv_i64 tmp;

    *pimm = 0;
    switch (*pcond) {
    case TCG_COND_LE:
    case TCG_COND_GT:
        /* For <= or >, the -0.0 value directly compares the way we want.  */
        return src;

    case TCG_COND_EQ:
    case TCG_COND_NE:
        /* For == or !=, we can compare without the sign bit. */
        *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE;
        *pimm = INT64_MAX;
        return src;

    case TCG_COND_GE:
    case TCG_COND_LT:
        /* For >= or <, map -0.0 to +0.0. */
        tmp = tcg_temp_new_i64();
        tcg_gen_movcond_i64(TCG_COND_EQ, tmp,
                            src, tcg_constant_i64(INT64_MIN),
                            tcg_constant_i64(0), src);
        return tmp;

    default:
        g_assert_not_reached();
    }
}

static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
                                int32_t disp)
{
    uint64_t imm;
    TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
    return gen_bcond_internal(ctx, cond, tmp, imm, disp);
}

static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
{
    uint64_t imm;
    TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
    tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc),
                        tmp, tcg_constant_i64(imm),
                        load_fpr(ctx, rb), load_fpr(ctx, rc));
}

#define QUAL_RM_N       0x080   /* Round mode nearest even */
#define QUAL_RM_C       0x000   /* Round mode chopped */
#define QUAL_RM_M       0x040   /* Round mode minus infinity */
#define QUAL_RM_D       0x0c0   /* Round mode dynamic */
#define QUAL_RM_MASK    0x0c0

#define QUAL_U          0x100   /* Underflow enable (fp output) */
#define QUAL_V          0x100   /* Overflow enable (int output) */
#define QUAL_S          0x400   /* Software completion enable */
#define QUAL_I          0x200   /* Inexact detection enable */

static void gen_qual_roundmode(DisasContext *ctx, int fn11)
{
    TCGv_i32 tmp;

    fn11 &= QUAL_RM_MASK;
    if (fn11 == ctx->tb_rm) {
        return;
    }
    ctx->tb_rm = fn11;

    tmp = tcg_temp_new_i32();
    switch (fn11) {
    case QUAL_RM_N:
        tcg_gen_movi_i32(tmp, float_round_nearest_even);
        break;
    case QUAL_RM_C:
        tcg_gen_movi_i32(tmp, float_round_to_zero);
        break;
    case QUAL_RM_M:
        tcg_gen_movi_i32(tmp, float_round_down);
        break;
    case QUAL_RM_D:
        tcg_gen_ld8u_i32(tmp, tcg_env,
                         offsetof(CPUAlphaState, fpcr_dyn_round));
        break;
    }

#if defined(CONFIG_SOFTFLOAT_INLINE)
    /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
       With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
       sets the one field.  */
    tcg_gen_st8_i32(tmp, tcg_env,
                    offsetof(CPUAlphaState, fp_status.float_rounding_mode));
#else
    gen_helper_setroundmode(tmp);
#endif
}

static void gen_qual_flushzero(DisasContext *ctx, int fn11)
{
    TCGv_i32 tmp;

    fn11 &= QUAL_U;
    if (fn11 == ctx->tb_ftz) {
        return;
    }
    ctx->tb_ftz = fn11;

    tmp = tcg_temp_new_i32();
    if (fn11) {
        /* Underflow is enabled, use the FPCR setting.  */
        tcg_gen_ld8u_i32(tmp, tcg_env,
                         offsetof(CPUAlphaState, fpcr_flush_to_zero));
    } else {
        /* Underflow is disabled, force flush-to-zero.  */
        tcg_gen_movi_i32(tmp, 1);
    }

#if defined(CONFIG_SOFTFLOAT_INLINE)
    tcg_gen_st8_i32(tmp, tcg_env,
                    offsetof(CPUAlphaState, fp_status.flush_to_zero));
#else
    gen_helper_setflushzero(tmp);
#endif
}

static TCGv gen_ieee_input(DisasContext *ctx, int reg, int fn11, int is_cmp)
{
    TCGv val;

    if (unlikely(reg == 31)) {
        val = load_zero(ctx);
    } else {
        val = cpu_fir[reg];
        if ((fn11 & QUAL_S) == 0) {
            if (is_cmp) {
                gen_helper_ieee_input_cmp(tcg_env, val);
            } else {
                gen_helper_ieee_input(tcg_env, val);
            }
        } else {
#ifndef CONFIG_USER_ONLY
            /* In system mode, raise exceptions for denormals like real
               hardware.  In user mode, proceed as if the OS completion
               handler is handling the denormal as per spec.  */
            gen_helper_ieee_input_s(tcg_env, val);
#endif
        }
    }
    return val;
}

static void gen_fp_exc_raise(int rc, int fn11)
{
    /* ??? We ought to be able to do something with imprecise exceptions.
       E.g. notice we're still in the trap shadow of something within the
       TB and do not generate the code to signal the exception; end the TB
       when an exception is forced to arrive, either by consumption of a
       register value or TRAPB or EXCB.  */
    TCGv_i32 reg, ign;
    uint32_t ignore = 0;

    if (!(fn11 & QUAL_U)) {
        /* Note that QUAL_U == QUAL_V, so ignore either.  */
        ignore |= FPCR_UNF | FPCR_IOV;
    }
    if (!(fn11 & QUAL_I)) {
        ignore |= FPCR_INE;
    }
    ign = tcg_constant_i32(ignore);

    /* ??? Pass in the regno of the destination so that the helper can
       set EXC_MASK, which contains a bitmask of destination registers
       that have caused arithmetic traps.  A simple userspace emulation
       does not require this.  We do need it for a guest kernel's entArith,
       or if we were to do something clever with imprecise exceptions.  */
    reg = tcg_constant_i32(rc + 32);
    if (fn11 & QUAL_S) {
        gen_helper_fp_exc_raise_s(tcg_env, ign, reg);
    } else {
        gen_helper_fp_exc_raise(tcg_env, ign, reg);
    }
}

static void gen_cvtlq(TCGv vc, TCGv vb)
{
    TCGv tmp = tcg_temp_new();

    /* The arithmetic right shift here, plus the sign-extended mask below
       yields a sign-extended result without an explicit ext32s_i64.  */
    tcg_gen_shri_i64(tmp, vb, 29);
    tcg_gen_sari_i64(vc, vb, 32);
    tcg_gen_deposit_i64(vc, vc, tmp, 0, 30);
}

static void gen_ieee_arith2(DisasContext *ctx,
                            void (*helper)(TCGv, TCGv_ptr, TCGv),
                            int rb, int rc, int fn11)
{
    TCGv vb;

    gen_qual_roundmode(ctx, fn11);
    gen_qual_flushzero(ctx, fn11);

    vb = gen_ieee_input(ctx, rb, fn11, 0);
    helper(dest_fpr(ctx, rc), tcg_env, vb);

    gen_fp_exc_raise(rc, fn11);
}

#define IEEE_ARITH2(name)                                       \
static inline void glue(gen_, name)(DisasContext *ctx,          \
                                    int rb, int rc, int fn11)   \
{                                                               \
    gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11);      \
}
IEEE_ARITH2(sqrts)
IEEE_ARITH2(sqrtt)
IEEE_ARITH2(cvtst)
IEEE_ARITH2(cvtts)

static void gen_cvttq(DisasContext *ctx, int rb, int rc, int fn11)
{
    TCGv vb, vc;

    /* No need to set flushzero, since we have an integer output.  */
    vb = gen_ieee_input(ctx, rb, fn11, 0);
    vc = dest_fpr(ctx, rc);

    /* Almost all integer conversions use cropped rounding;
       special case that.  */
    if ((fn11 & QUAL_RM_MASK) == QUAL_RM_C) {
        gen_helper_cvttq_c(vc, tcg_env, vb);
    } else {
        gen_qual_roundmode(ctx, fn11);
        gen_helper_cvttq(vc, tcg_env, vb);
    }
    gen_fp_exc_raise(rc, fn11);
}

static void gen_ieee_intcvt(DisasContext *ctx,
                            void (*helper)(TCGv, TCGv_ptr, TCGv),
                            int rb, int rc, int fn11)
{
    TCGv vb, vc;

    gen_qual_roundmode(ctx, fn11);
    vb = load_fpr(ctx, rb);
    vc = dest_fpr(ctx, rc);

    /* The only exception that can be raised by integer conversion
       is inexact.  Thus we only need to worry about exceptions when
       inexact handling is requested.  */
    if (fn11 & QUAL_I) {
        helper(vc, tcg_env, vb);
        gen_fp_exc_raise(rc, fn11);
    } else {
        helper(vc, tcg_env, vb);
    }
}

#define IEEE_INTCVT(name)                                       \
static inline void glue(gen_, name)(DisasContext *ctx,          \
                                    int rb, int rc, int fn11)   \
{                                                               \
    gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11);      \
}
IEEE_INTCVT(cvtqs)
IEEE_INTCVT(cvtqt)

static void gen_cpy_mask(TCGv vc, TCGv va, TCGv vb, bool inv_a, uint64_t mask)
{
    TCGv vmask = tcg_constant_i64(mask);
    TCGv tmp = tcg_temp_new_i64();

    if (inv_a) {
        tcg_gen_andc_i64(tmp, vmask, va);
    } else {
        tcg_gen_and_i64(tmp, va, vmask);
    }

    tcg_gen_andc_i64(vc, vb, vmask);
    tcg_gen_or_i64(vc, vc, tmp);
}

static void gen_ieee_arith3(DisasContext *ctx,
                            void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
                            int ra, int rb, int rc, int fn11)
{
    TCGv va, vb, vc;

    gen_qual_roundmode(ctx, fn11);
    gen_qual_flushzero(ctx, fn11);

    va = gen_ieee_input(ctx, ra, fn11, 0);
    vb = gen_ieee_input(ctx, rb, fn11, 0);
    vc = dest_fpr(ctx, rc);
    helper(vc, tcg_env, va, vb);

    gen_fp_exc_raise(rc, fn11);
}

#define IEEE_ARITH3(name)                                               \
static inline void glue(gen_, name)(DisasContext *ctx,                  \
                                    int ra, int rb, int rc, int fn11)   \
{                                                                       \
    gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11);          \
}
IEEE_ARITH3(adds)
IEEE_ARITH3(subs)
IEEE_ARITH3(muls)
IEEE_ARITH3(divs)
IEEE_ARITH3(addt)
IEEE_ARITH3(subt)
IEEE_ARITH3(mult)
IEEE_ARITH3(divt)

static void gen_ieee_compare(DisasContext *ctx,
                             void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv),
                             int ra, int rb, int rc, int fn11)
{
    TCGv va, vb, vc;

    va = gen_ieee_input(ctx, ra, fn11, 1);
    vb = gen_ieee_input(ctx, rb, fn11, 1);
    vc = dest_fpr(ctx, rc);
    helper(vc, tcg_env, va, vb);

    gen_fp_exc_raise(rc, fn11);
}

#define IEEE_CMP3(name)                                                 \
static inline void glue(gen_, name)(DisasContext *ctx,                  \
                                    int ra, int rb, int rc, int fn11)   \
{                                                                       \
    gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11);         \
}
IEEE_CMP3(cmptun)
IEEE_CMP3(cmpteq)
IEEE_CMP3(cmptlt)
IEEE_CMP3(cmptle)

static inline uint64_t zapnot_mask(uint8_t lit)
{
    uint64_t mask = 0;
    int i;

    for (i = 0; i < 8; ++i) {
        if ((lit >> i) & 1) {
            mask |= 0xffull << (i * 8);
        }
    }
    return mask;
}

/* Implement zapnot with an immediate operand, which expands to some
   form of immediate AND.  This is a basic building block in the
   definition of many of the other byte manipulation instructions.  */
static void gen_zapnoti(TCGv dest, TCGv src, uint8_t lit)
{
    switch (lit) {
    case 0x00:
        tcg_gen_movi_i64(dest, 0);
        break;
    case 0x01:
        tcg_gen_ext8u_i64(dest, src);
        break;
    case 0x03:
        tcg_gen_ext16u_i64(dest, src);
        break;
    case 0x0f:
        tcg_gen_ext32u_i64(dest, src);
        break;
    case 0xff:
        tcg_gen_mov_i64(dest, src);
        break;
    default:
        tcg_gen_andi_i64(dest, src, zapnot_mask(lit));
        break;
    }
}

/* EXTWH, EXTLH, EXTQH */
static void gen_ext_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                      uint8_t lit, uint8_t byte_mask)
{
    if (islit) {
        int pos = (64 - lit * 8) & 0x3f;
        int len = cto32(byte_mask) * 8;
        if (pos < len) {
            tcg_gen_deposit_z_i64(vc, va, pos, len - pos);
        } else {
            tcg_gen_movi_i64(vc, 0);
        }
    } else {
        TCGv tmp = tcg_temp_new();
        tcg_gen_shli_i64(tmp, load_gpr(ctx, rb), 3);
        tcg_gen_neg_i64(tmp, tmp);
        tcg_gen_andi_i64(tmp, tmp, 0x3f);
        tcg_gen_shl_i64(vc, va, tmp);
    }
    gen_zapnoti(vc, vc, byte_mask);
}

/* EXTBL, EXTWL, EXTLL, EXTQL */
static void gen_ext_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                      uint8_t lit, uint8_t byte_mask)
{
    if (islit) {
        int pos = (lit & 7) * 8;
        int len = cto32(byte_mask) * 8;
        if (pos + len >= 64) {
            len = 64 - pos;
        }
        tcg_gen_extract_i64(vc, va, pos, len);
    } else {
        TCGv tmp = tcg_temp_new();
        tcg_gen_andi_i64(tmp, load_gpr(ctx, rb), 7);
        tcg_gen_shli_i64(tmp, tmp, 3);
        tcg_gen_shr_i64(vc, va, tmp);
        gen_zapnoti(vc, vc, byte_mask);
    }
}

/* INSWH, INSLH, INSQH */
static void gen_ins_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                      uint8_t lit, uint8_t byte_mask)
{
    if (islit) {
        int pos = 64 - (lit & 7) * 8;
        int len = cto32(byte_mask) * 8;
        if (pos < len) {
            tcg_gen_extract_i64(vc, va, pos, len - pos);
        } else {
            tcg_gen_movi_i64(vc, 0);
        }
    } else {
        TCGv tmp = tcg_temp_new();
        TCGv shift = tcg_temp_new();

        /* The instruction description has us left-shift the byte mask
           and extract bits <15:8> and apply that zap at the end.  This
           is equivalent to simply performing the zap first and shifting
           afterward.  */
        gen_zapnoti(tmp, va, byte_mask);

        /* If (B & 7) == 0, we need to shift by 64 and leave a zero.  Do this
           portably by splitting the shift into two parts: shift_count-1 and 1.
           Arrange for the -1 by using ones-complement instead of
           twos-complement in the negation: ~(B * 8) & 63.  */

        tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
        tcg_gen_not_i64(shift, shift);
        tcg_gen_andi_i64(shift, shift, 0x3f);

        tcg_gen_shr_i64(vc, tmp, shift);
        tcg_gen_shri_i64(vc, vc, 1);
    }
}

/* INSBL, INSWL, INSLL, INSQL */
static void gen_ins_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                      uint8_t lit, uint8_t byte_mask)
{
    if (islit) {
        int pos = (lit & 7) * 8;
        int len = cto32(byte_mask) * 8;
        if (pos + len > 64) {
            len = 64 - pos;
        }
        tcg_gen_deposit_z_i64(vc, va, pos, len);
    } else {
        TCGv tmp = tcg_temp_new();
        TCGv shift = tcg_temp_new();

        /* The instruction description has us left-shift the byte mask
           and extract bits <15:8> and apply that zap at the end.  This
           is equivalent to simply performing the zap first and shifting
           afterward.  */
        gen_zapnoti(tmp, va, byte_mask);

        tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
        tcg_gen_shli_i64(shift, shift, 3);
        tcg_gen_shl_i64(vc, tmp, shift);
    }
}

/* MSKWH, MSKLH, MSKQH */
static void gen_msk_h(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                      uint8_t lit, uint8_t byte_mask)
{
    if (islit) {
        gen_zapnoti(vc, va, ~((byte_mask << (lit & 7)) >> 8));
    } else {
        TCGv shift = tcg_temp_new();
        TCGv mask = tcg_temp_new();

        /* The instruction description is as above, where the byte_mask
           is shifted left, and then we extract bits <15:8>.  This can be
           emulated with a right-shift on the expanded byte mask.  This
           requires extra care because for an input <2:0> == 0 we need a
           shift of 64 bits in order to generate a zero.  This is done by
           splitting the shift into two parts, the variable shift - 1
           followed by a constant 1 shift.  The code we expand below is
           equivalent to ~(B * 8) & 63.  */

        tcg_gen_shli_i64(shift, load_gpr(ctx, rb), 3);
        tcg_gen_not_i64(shift, shift);
        tcg_gen_andi_i64(shift, shift, 0x3f);
        tcg_gen_movi_i64(mask, zapnot_mask (byte_mask));
        tcg_gen_shr_i64(mask, mask, shift);
        tcg_gen_shri_i64(mask, mask, 1);

        tcg_gen_andc_i64(vc, va, mask);
    }
}

/* MSKBL, MSKWL, MSKLL, MSKQL */
static void gen_msk_l(DisasContext *ctx, TCGv vc, TCGv va, int rb, bool islit,
                      uint8_t lit, uint8_t byte_mask)
{
    if (islit) {
        gen_zapnoti(vc, va, ~(byte_mask << (lit & 7)));
    } else {
        TCGv shift = tcg_temp_new();
        TCGv mask = tcg_temp_new();

        tcg_gen_andi_i64(shift, load_gpr(ctx, rb), 7);
        tcg_gen_shli_i64(shift, shift, 3);
        tcg_gen_movi_i64(mask, zapnot_mask(byte_mask));
        tcg_gen_shl_i64(mask, mask, shift);

        tcg_gen_andc_i64(vc, va, mask);
    }
}

static void gen_rx(DisasContext *ctx, int ra, int set)
{
    if (ra != 31) {
        ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT);
    }

    st_flag_byte(tcg_constant_i64(set), ENV_FLAG_RX_SHIFT);
}

static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode)
{
    /* We're emulating OSF/1 PALcode.  Many of these are trivial access
       to internal cpu registers.  */

    /* Unprivileged PAL call */
    if (palcode >= 0x80 && palcode < 0xC0) {
        switch (palcode) {
        case 0x86:
            /* IMB */
            /* No-op inside QEMU.  */
            break;
        case 0x9E:
            /* RDUNIQUE */
            tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
                           offsetof(CPUAlphaState, unique));
            break;
        case 0x9F:
            /* WRUNIQUE */
            tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
                           offsetof(CPUAlphaState, unique));
            break;
        default:
            palcode &= 0xbf;
            goto do_call_pal;
        }
        return DISAS_NEXT;
    }

#ifndef CONFIG_USER_ONLY
    /* Privileged PAL code */
    if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) {
        switch (palcode) {
        case 0x01:
            /* CFLUSH */
            /* No-op inside QEMU.  */
            break;
        case 0x02:
            /* DRAINA */
            /* No-op inside QEMU.  */
            break;
        case 0x2D:
            /* WRVPTPTR */
            tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
                           offsetof(CPUAlphaState, vptptr));
            break;
        case 0x31:
            /* WRVAL */
            tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
                           offsetof(CPUAlphaState, sysval));
            break;
        case 0x32:
            /* RDVAL */
            tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
                           offsetof(CPUAlphaState, sysval));
            break;

        case 0x35:
            /* SWPIPL */
            /* Note that we already know we're in kernel mode, so we know
               that PS only contains the 3 IPL bits.  */
            ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);

            /* But make sure and store only the 3 IPL bits from the user.  */
            {
                TCGv tmp = tcg_temp_new();
                tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK);
                st_flag_byte(tmp, ENV_FLAG_PS_SHIFT);
            }

            /* Allow interrupts to be recognized right away.  */
            tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
            return DISAS_PC_UPDATED_NOCHAIN;

        case 0x36:
            /* RDPS */
            ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT);
            break;

        case 0x38:
            /* WRUSP */
            tcg_gen_st_i64(ctx->ir[IR_A0], tcg_env,
                           offsetof(CPUAlphaState, usp));
            break;
        case 0x3A:
            /* RDUSP */
            tcg_gen_ld_i64(ctx->ir[IR_V0], tcg_env,
                           offsetof(CPUAlphaState, usp));
            break;
        case 0x3C:
            /* WHAMI */
            tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env,
                -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index));
            break;

        case 0x3E:
            /* WTINT */
            tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
                           -offsetof(AlphaCPU, env) +
                           offsetof(CPUState, halted));
            tcg_gen_movi_i64(ctx->ir[IR_V0], 0);
            return gen_excp(ctx, EXCP_HALTED, 0);

        default:
            palcode &= 0x3f;
            goto do_call_pal;
        }
        return DISAS_NEXT;
    }
#endif
    return gen_invalid(ctx);

 do_call_pal:
#ifdef CONFIG_USER_ONLY
    return gen_excp(ctx, EXCP_CALL_PAL, palcode);
#else
    {
        TCGv tmp = tcg_temp_new();
        uint64_t exc_addr = ctx->base.pc_next;
        uint64_t entry = ctx->palbr;

        if (ctx->tbflags & ENV_FLAG_PAL_MODE) {
            exc_addr |= 1;
        } else {
            tcg_gen_movi_i64(tmp, 1);
            st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
        }

        tcg_gen_movi_i64(tmp, exc_addr);
        tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr));

        entry += (palcode & 0x80
                  ? 0x2000 + (palcode - 0x80) * 64
                  : 0x1000 + palcode * 64);

        tcg_gen_movi_i64(cpu_pc, entry);
        return DISAS_PC_UPDATED;
    }
#endif
}

#ifndef CONFIG_USER_ONLY

#define PR_LONG         0x200000

static int cpu_pr_data(int pr)
{
    switch (pr) {
    case  2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG;
    case  3: return offsetof(CPUAlphaState, trap_arg0);
    case  4: return offsetof(CPUAlphaState, trap_arg1);
    case  5: return offsetof(CPUAlphaState, trap_arg2);
    case  6: return offsetof(CPUAlphaState, exc_addr);
    case  7: return offsetof(CPUAlphaState, palbr);
    case  8: return offsetof(CPUAlphaState, ptbr);
    case  9: return offsetof(CPUAlphaState, vptptr);
    case 10: return offsetof(CPUAlphaState, unique);
    case 11: return offsetof(CPUAlphaState, sysval);
    case 12: return offsetof(CPUAlphaState, usp);

    case 40 ... 63:
        return offsetof(CPUAlphaState, scratch[pr - 40]);

    case 251:
        return offsetof(CPUAlphaState, alarm_expire);
    }
    return 0;
}

static DisasJumpType gen_mfpr(DisasContext *ctx, TCGv va, int regno)
{
    void (*helper)(TCGv);
    int data;

    switch (regno) {
    case 32 ... 39:
        /* Accessing the "non-shadow" general registers.  */
        regno = regno == 39 ? 25 : regno - 32 + 8;
        tcg_gen_mov_i64(va, cpu_std_ir[regno]);
        break;

    case 250: /* WALLTIME */
        helper = gen_helper_get_walltime;
        goto do_helper;
    case 249: /* VMTIME */
        helper = gen_helper_get_vmtime;
    do_helper:
        if (translator_io_start(&ctx->base)) {
            helper(va);
            return DISAS_PC_STALE;
        } else {
            helper(va);
        }
        break;

    case 0: /* PS */
        ld_flag_byte(va, ENV_FLAG_PS_SHIFT);
        break;
    case 1: /* FEN */
        ld_flag_byte(va, ENV_FLAG_FEN_SHIFT);
        break;

    default:
        /* The basic registers are data only, and unknown registers
           are read-zero, write-ignore.  */
        data = cpu_pr_data(regno);
        if (data == 0) {
            tcg_gen_movi_i64(va, 0);
        } else if (data & PR_LONG) {
            tcg_gen_ld32s_i64(va, tcg_env, data & ~PR_LONG);
        } else {
            tcg_gen_ld_i64(va, tcg_env, data);
        }
        break;
    }

    return DISAS_NEXT;
}

static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno)
{
    int data;
    DisasJumpType ret = DISAS_NEXT;

    switch (regno) {
    case 255:
        /* TBIA */
        gen_helper_tbia(tcg_env);
        break;

    case 254:
        /* TBIS */
        gen_helper_tbis(tcg_env, vb);
        break;

    case 253:
        /* WAIT */
        tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
                       -offsetof(AlphaCPU, env) + offsetof(CPUState, halted));
        return gen_excp(ctx, EXCP_HALTED, 0);

    case 252:
        /* HALT */
        gen_helper_halt(vb);
        return DISAS_PC_STALE;

    case 251:
        /* ALARM */
        if (translator_io_start(&ctx->base)) {
            ret = DISAS_PC_STALE;
        }
        gen_helper_set_alarm(tcg_env, vb);
        break;

    case 7:
        /* PALBR */
        tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr));
        /* Changing the PAL base register implies un-chaining all of the TBs
           that ended with a CALL_PAL.  Since the base register usually only
           changes during boot, flushing everything works well.  */
        gen_helper_tb_flush(tcg_env);
        return DISAS_PC_STALE;

    case 32 ... 39:
        /* Accessing the "non-shadow" general registers.  */
        regno = regno == 39 ? 25 : regno - 32 + 8;
        tcg_gen_mov_i64(cpu_std_ir[regno], vb);
        break;

    case 0: /* PS */
        st_flag_byte(vb, ENV_FLAG_PS_SHIFT);
        break;
    case 1: /* FEN */
        st_flag_byte(vb, ENV_FLAG_FEN_SHIFT);
        break;

    default:
        /* The basic registers are data only, and unknown registers
           are read-zero, write-ignore.  */
        data = cpu_pr_data(regno);
        if (data != 0) {
            if (data & PR_LONG) {
                tcg_gen_st32_i64(vb, tcg_env, data & ~PR_LONG);
            } else {
                tcg_gen_st_i64(vb, tcg_env, data);
            }
        }
        break;
    }

    return ret;
}
#endif /* !USER_ONLY*/

#define REQUIRE_NO_LIT                          \
    do {                                        \
        if (real_islit) {                       \
            goto invalid_opc;                   \
        }                                       \
    } while (0)

#define REQUIRE_AMASK(FLAG)                     \
    do {                                        \
        if ((ctx->amask & AMASK_##FLAG) == 0) { \
            goto invalid_opc;                   \
        }                                       \
    } while (0)

#define REQUIRE_TB_FLAG(FLAG)                   \
    do {                                        \
        if ((ctx->tbflags & (FLAG)) == 0) {     \
            goto invalid_opc;                   \
        }                                       \
    } while (0)

#define REQUIRE_REG_31(WHICH)                   \
    do {                                        \
        if (WHICH != 31) {                      \
            goto invalid_opc;                   \
        }                                       \
    } while (0)

#define REQUIRE_FEN                             \
    do {                                        \
        if (!(ctx->tbflags & ENV_FLAG_FEN)) {   \
            goto raise_fen;                     \
        }                                       \
    } while (0)

static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn)
{
    int32_t disp21, disp16, disp12 __attribute__((unused));
    uint16_t fn11;
    uint8_t opc, ra, rb, rc, fpfn, fn7, lit;
    bool islit, real_islit;
    TCGv va, vb, vc, tmp, tmp2;
    TCGv_i32 t32;
    DisasJumpType ret;

    /* Decode all instruction fields */
    opc = extract32(insn, 26, 6);
    ra = extract32(insn, 21, 5);
    rb = extract32(insn, 16, 5);
    rc = extract32(insn, 0, 5);
    real_islit = islit = extract32(insn, 12, 1);
    lit = extract32(insn, 13, 8);

    disp21 = sextract32(insn, 0, 21);
    disp16 = sextract32(insn, 0, 16);
    disp12 = sextract32(insn, 0, 12);

    fn11 = extract32(insn, 5, 11);
    fpfn = extract32(insn, 5, 6);
    fn7 = extract32(insn, 5, 7);

    if (rb == 31 && !islit) {
        islit = true;
        lit = 0;
    }

    ret = DISAS_NEXT;
    switch (opc) {
    case 0x00:
        /* CALL_PAL */
        ret = gen_call_pal(ctx, insn & 0x03ffffff);
        break;
    case 0x01:
        /* OPC01 */
        goto invalid_opc;
    case 0x02:
        /* OPC02 */
        goto invalid_opc;
    case 0x03:
        /* OPC03 */
        goto invalid_opc;
    case 0x04:
        /* OPC04 */
        goto invalid_opc;
    case 0x05:
        /* OPC05 */
        goto invalid_opc;
    case 0x06:
        /* OPC06 */
        goto invalid_opc;
    case 0x07:
        /* OPC07 */
        goto invalid_opc;

    case 0x09:
        /* LDAH */
        disp16 = (uint32_t)disp16 << 16;
        /* fall through */
    case 0x08:
        /* LDA */
        va = dest_gpr(ctx, ra);
        /* It's worth special-casing immediate loads.  */
        if (rb == 31) {
            tcg_gen_movi_i64(va, disp16);
        } else {
            tcg_gen_addi_i64(va, load_gpr(ctx, rb), disp16);
        }
        break;

    case 0x0A:
        /* LDBU */
        REQUIRE_AMASK(BWX);
        gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0);
        break;
    case 0x0B:
        /* LDQ_U */
        gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 1, 0);
        break;
    case 0x0C:
        /* LDWU */
        REQUIRE_AMASK(BWX);
        gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0);
        break;
    case 0x0D:
        /* STW */
        REQUIRE_AMASK(BWX);
        gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0);
        break;
    case 0x0E:
        /* STB */
        REQUIRE_AMASK(BWX);
        gen_store_int(ctx, ra, rb, disp16, MO_UB, 0);
        break;
    case 0x0F:
        /* STQ_U */
        gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 1);
        break;

    case 0x10:
        vc = dest_gpr(ctx, rc);
        vb = load_gpr_lit(ctx, rb, lit, islit);

        if (ra == 31) {
            if (fn7 == 0x00) {
                /* Special case ADDL as SEXTL.  */
                tcg_gen_ext32s_i64(vc, vb);
                break;
            }
            if (fn7 == 0x29) {
                /* Special case SUBQ as NEGQ.  */
                tcg_gen_neg_i64(vc, vb);
                break;
            }
        }

        va = load_gpr(ctx, ra);
        switch (fn7) {
        case 0x00:
            /* ADDL */
            tcg_gen_add_i64(vc, va, vb);
            tcg_gen_ext32s_i64(vc, vc);
            break;
        case 0x02:
            /* S4ADDL */
            tmp = tcg_temp_new();
            tcg_gen_shli_i64(tmp, va, 2);
            tcg_gen_add_i64(tmp, tmp, vb);
            tcg_gen_ext32s_i64(vc, tmp);
            break;
        case 0x09:
            /* SUBL */
            tcg_gen_sub_i64(vc, va, vb);
            tcg_gen_ext32s_i64(vc, vc);
            break;
        case 0x0B:
            /* S4SUBL */
            tmp = tcg_temp_new();
            tcg_gen_shli_i64(tmp, va, 2);
            tcg_gen_sub_i64(tmp, tmp, vb);
            tcg_gen_ext32s_i64(vc, tmp);
            break;
        case 0x0F:
            /* CMPBGE */
            if (ra == 31) {
                /* Special case 0 >= X as X == 0.  */
                gen_helper_cmpbe0(vc, vb);
            } else {
                gen_helper_cmpbge(vc, va, vb);
            }
            break;
        case 0x12:
            /* S8ADDL */
            tmp = tcg_temp_new();
            tcg_gen_shli_i64(tmp, va, 3);
            tcg_gen_add_i64(tmp, tmp, vb);
            tcg_gen_ext32s_i64(vc, tmp);
            break;
        case 0x1B:
            /* S8SUBL */
            tmp = tcg_temp_new();
            tcg_gen_shli_i64(tmp, va, 3);
            tcg_gen_sub_i64(tmp, tmp, vb);
            tcg_gen_ext32s_i64(vc, tmp);
            break;
        case 0x1D:
            /* CMPULT */
            tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb);
            break;
        case 0x20:
            /* ADDQ */
            tcg_gen_add_i64(vc, va, vb);
            break;
        case 0x22:
            /* S4ADDQ */
            tmp = tcg_temp_new();
            tcg_gen_shli_i64(tmp, va, 2);
            tcg_gen_add_i64(vc, tmp, vb);
            break;
        case 0x29:
            /* SUBQ */
            tcg_gen_sub_i64(vc, va, vb);
            break;
        case 0x2B:
            /* S4SUBQ */
            tmp = tcg_temp_new();
            tcg_gen_shli_i64(tmp, va, 2);
            tcg_gen_sub_i64(vc, tmp, vb);
            break;
        case 0x2D:
            /* CMPEQ */
            tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb);
            break;
        case 0x32:
            /* S8ADDQ */
            tmp = tcg_temp_new();
            tcg_gen_shli_i64(tmp, va, 3);
            tcg_gen_add_i64(vc, tmp, vb);
            break;
        case 0x3B:
            /* S8SUBQ */
            tmp = tcg_temp_new();
            tcg_gen_shli_i64(tmp, va, 3);
            tcg_gen_sub_i64(vc, tmp, vb);
            break;
        case 0x3D:
            /* CMPULE */
            tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb);
            break;
        case 0x40:
            /* ADDL/V */
            tmp = tcg_temp_new();
            tcg_gen_ext32s_i64(tmp, va);
            tcg_gen_ext32s_i64(vc, vb);
            tcg_gen_add_i64(tmp, tmp, vc);
            tcg_gen_ext32s_i64(vc, tmp);
            gen_helper_check_overflow(tcg_env, vc, tmp);
            break;
        case 0x49:
            /* SUBL/V */
            tmp = tcg_temp_new();
            tcg_gen_ext32s_i64(tmp, va);
            tcg_gen_ext32s_i64(vc, vb);
            tcg_gen_sub_i64(tmp, tmp, vc);
            tcg_gen_ext32s_i64(vc, tmp);
            gen_helper_check_overflow(tcg_env, vc, tmp);
            break;
        case 0x4D:
            /* CMPLT */
            tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb);
            break;
        case 0x60:
            /* ADDQ/V */
            tmp = tcg_temp_new();
            tmp2 = tcg_temp_new();
            tcg_gen_eqv_i64(tmp, va, vb);
            tcg_gen_mov_i64(tmp2, va);
            tcg_gen_add_i64(vc, va, vb);
            tcg_gen_xor_i64(tmp2, tmp2, vc);
            tcg_gen_and_i64(tmp, tmp, tmp2);
            tcg_gen_shri_i64(tmp, tmp, 63);
            tcg_gen_movi_i64(tmp2, 0);
            gen_helper_check_overflow(tcg_env, tmp, tmp2);
            break;
        case 0x69:
            /* SUBQ/V */
            tmp = tcg_temp_new();
            tmp2 = tcg_temp_new();
            tcg_gen_xor_i64(tmp, va, vb);
            tcg_gen_mov_i64(tmp2, va);
            tcg_gen_sub_i64(vc, va, vb);
            tcg_gen_xor_i64(tmp2, tmp2, vc);
            tcg_gen_and_i64(tmp, tmp, tmp2);
            tcg_gen_shri_i64(tmp, tmp, 63);
            tcg_gen_movi_i64(tmp2, 0);
            gen_helper_check_overflow(tcg_env, tmp, tmp2);
            break;
        case 0x6D:
            /* CMPLE */
            tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x11:
        if (fn7 == 0x20) {
            if (rc == 31) {
                /* Special case BIS as NOP.  */
                break;
            }
            if (ra == 31) {
                /* Special case BIS as MOV.  */
                vc = dest_gpr(ctx, rc);
                if (islit) {
                    tcg_gen_movi_i64(vc, lit);
                } else {
                    tcg_gen_mov_i64(vc, load_gpr(ctx, rb));
                }
                break;
            }
        }

        vc = dest_gpr(ctx, rc);
        vb = load_gpr_lit(ctx, rb, lit, islit);

        if (fn7 == 0x28 && ra == 31) {
            /* Special case ORNOT as NOT.  */
            tcg_gen_not_i64(vc, vb);
            break;
        }

        va = load_gpr(ctx, ra);
        switch (fn7) {
        case 0x00:
            /* AND */
            tcg_gen_and_i64(vc, va, vb);
            break;
        case 0x08:
            /* BIC */
            tcg_gen_andc_i64(vc, va, vb);
            break;
        case 0x14:
            /* CMOVLBS */
            tcg_gen_movcond_i64(TCG_COND_TSTNE, vc, va, tcg_constant_i64(1),
                                vb, load_gpr(ctx, rc));
            break;
        case 0x16:
            /* CMOVLBC */
            tcg_gen_movcond_i64(TCG_COND_TSTEQ, vc, va, tcg_constant_i64(1),
                                vb, load_gpr(ctx, rc));
            break;
        case 0x20:
            /* BIS */
            tcg_gen_or_i64(vc, va, vb);
            break;
        case 0x24:
            /* CMOVEQ */
            tcg_gen_movcond_i64(TCG_COND_EQ, vc, va, load_zero(ctx),
                                vb, load_gpr(ctx, rc));
            break;
        case 0x26:
            /* CMOVNE */
            tcg_gen_movcond_i64(TCG_COND_NE, vc, va, load_zero(ctx),
                                vb, load_gpr(ctx, rc));
            break;
        case 0x28:
            /* ORNOT */
            tcg_gen_orc_i64(vc, va, vb);
            break;
        case 0x40:
            /* XOR */
            tcg_gen_xor_i64(vc, va, vb);
            break;
        case 0x44:
            /* CMOVLT */
            tcg_gen_movcond_i64(TCG_COND_LT, vc, va, load_zero(ctx),
                                vb, load_gpr(ctx, rc));
            break;
        case 0x46:
            /* CMOVGE */
            tcg_gen_movcond_i64(TCG_COND_GE, vc, va, load_zero(ctx),
                                vb, load_gpr(ctx, rc));
            break;
        case 0x48:
            /* EQV */
            tcg_gen_eqv_i64(vc, va, vb);
            break;
        case 0x61:
            /* AMASK */
            REQUIRE_REG_31(ra);
            tcg_gen_andi_i64(vc, vb, ~ctx->amask);
            break;
        case 0x64:
            /* CMOVLE */
            tcg_gen_movcond_i64(TCG_COND_LE, vc, va, load_zero(ctx),
                                vb, load_gpr(ctx, rc));
            break;
        case 0x66:
            /* CMOVGT */
            tcg_gen_movcond_i64(TCG_COND_GT, vc, va, load_zero(ctx),
                                vb, load_gpr(ctx, rc));
            break;
        case 0x6C:
            /* IMPLVER */
            REQUIRE_REG_31(ra);
            tcg_gen_movi_i64(vc, ctx->implver);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x12:
        vc = dest_gpr(ctx, rc);
        va = load_gpr(ctx, ra);
        switch (fn7) {
        case 0x02:
            /* MSKBL */
            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x01);
            break;
        case 0x06:
            /* EXTBL */
            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x01);
            break;
        case 0x0B:
            /* INSBL */
            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x01);
            break;
        case 0x12:
            /* MSKWL */
            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x03);
            break;
        case 0x16:
            /* EXTWL */
            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x03);
            break;
        case 0x1B:
            /* INSWL */
            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x03);
            break;
        case 0x22:
            /* MSKLL */
            gen_msk_l(ctx, vc, va, rb, islit, lit, 0x0f);
            break;
        case 0x26:
            /* EXTLL */
            gen_ext_l(ctx, vc, va, rb, islit, lit, 0x0f);
            break;
        case 0x2B:
            /* INSLL */
            gen_ins_l(ctx, vc, va, rb, islit, lit, 0x0f);
            break;
        case 0x30:
            /* ZAP */
            if (islit) {
                gen_zapnoti(vc, va, ~lit);
            } else {
                gen_helper_zap(vc, va, load_gpr(ctx, rb));
            }
            break;
        case 0x31:
            /* ZAPNOT */
            if (islit) {
                gen_zapnoti(vc, va, lit);
            } else {
                gen_helper_zapnot(vc, va, load_gpr(ctx, rb));
            }
            break;
        case 0x32:
            /* MSKQL */
            gen_msk_l(ctx, vc, va, rb, islit, lit, 0xff);
            break;
        case 0x34:
            /* SRL */
            if (islit) {
                tcg_gen_shri_i64(vc, va, lit & 0x3f);
            } else {
                tmp = tcg_temp_new();
                vb = load_gpr(ctx, rb);
                tcg_gen_andi_i64(tmp, vb, 0x3f);
                tcg_gen_shr_i64(vc, va, tmp);
            }
            break;
        case 0x36:
            /* EXTQL */
            gen_ext_l(ctx, vc, va, rb, islit, lit, 0xff);
            break;
        case 0x39:
            /* SLL */
            if (islit) {
                tcg_gen_shli_i64(vc, va, lit & 0x3f);
            } else {
                tmp = tcg_temp_new();
                vb = load_gpr(ctx, rb);
                tcg_gen_andi_i64(tmp, vb, 0x3f);
                tcg_gen_shl_i64(vc, va, tmp);
            }
            break;
        case 0x3B:
            /* INSQL */
            gen_ins_l(ctx, vc, va, rb, islit, lit, 0xff);
            break;
        case 0x3C:
            /* SRA */
            if (islit) {
                tcg_gen_sari_i64(vc, va, lit & 0x3f);
            } else {
                tmp = tcg_temp_new();
                vb = load_gpr(ctx, rb);
                tcg_gen_andi_i64(tmp, vb, 0x3f);
                tcg_gen_sar_i64(vc, va, tmp);
            }
            break;
        case 0x52:
            /* MSKWH */
            gen_msk_h(ctx, vc, va, rb, islit, lit, 0x03);
            break;
        case 0x57:
            /* INSWH */
            gen_ins_h(ctx, vc, va, rb, islit, lit, 0x03);
            break;
        case 0x5A:
            /* EXTWH */
            gen_ext_h(ctx, vc, va, rb, islit, lit, 0x03);
            break;
        case 0x62:
            /* MSKLH */
            gen_msk_h(ctx, vc, va, rb, islit, lit, 0x0f);
            break;
        case 0x67:
            /* INSLH */
            gen_ins_h(ctx, vc, va, rb, islit, lit, 0x0f);
            break;
        case 0x6A:
            /* EXTLH */
            gen_ext_h(ctx, vc, va, rb, islit, lit, 0x0f);
            break;
        case 0x72:
            /* MSKQH */
            gen_msk_h(ctx, vc, va, rb, islit, lit, 0xff);
            break;
        case 0x77:
            /* INSQH */
            gen_ins_h(ctx, vc, va, rb, islit, lit, 0xff);
            break;
        case 0x7A:
            /* EXTQH */
            gen_ext_h(ctx, vc, va, rb, islit, lit, 0xff);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x13:
        vc = dest_gpr(ctx, rc);
        vb = load_gpr_lit(ctx, rb, lit, islit);
        va = load_gpr(ctx, ra);
        switch (fn7) {
        case 0x00:
            /* MULL */
            tcg_gen_mul_i64(vc, va, vb);
            tcg_gen_ext32s_i64(vc, vc);
            break;
        case 0x20:
            /* MULQ */
            tcg_gen_mul_i64(vc, va, vb);
            break;
        case 0x30:
            /* UMULH */
            tmp = tcg_temp_new();
            tcg_gen_mulu2_i64(tmp, vc, va, vb);
            break;
        case 0x40:
            /* MULL/V */
            tmp = tcg_temp_new();
            tcg_gen_ext32s_i64(tmp, va);
            tcg_gen_ext32s_i64(vc, vb);
            tcg_gen_mul_i64(tmp, tmp, vc);
            tcg_gen_ext32s_i64(vc, tmp);
            gen_helper_check_overflow(tcg_env, vc, tmp);
            break;
        case 0x60:
            /* MULQ/V */
            tmp = tcg_temp_new();
            tmp2 = tcg_temp_new();
            tcg_gen_muls2_i64(vc, tmp, va, vb);
            tcg_gen_sari_i64(tmp2, vc, 63);
            gen_helper_check_overflow(tcg_env, tmp, tmp2);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x14:
        REQUIRE_AMASK(FIX);
        vc = dest_fpr(ctx, rc);
        switch (fpfn) { /* fn11 & 0x3F */
        case 0x04:
            /* ITOFS */
            REQUIRE_REG_31(rb);
            REQUIRE_FEN;
            t32 = tcg_temp_new_i32();
            va = load_gpr(ctx, ra);
            tcg_gen_extrl_i64_i32(t32, va);
            gen_helper_memory_to_s(vc, t32);
            break;
        case 0x0A:
            /* SQRTF */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            vb = load_fpr(ctx, rb);
            gen_helper_sqrtf(vc, tcg_env, vb);
            break;
        case 0x0B:
            /* SQRTS */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_sqrts(ctx, rb, rc, fn11);
            break;
        case 0x14:
            /* ITOFF */
            REQUIRE_REG_31(rb);
            REQUIRE_FEN;
            t32 = tcg_temp_new_i32();
            va = load_gpr(ctx, ra);
            tcg_gen_extrl_i64_i32(t32, va);
            gen_helper_memory_to_f(vc, t32);
            break;
        case 0x24:
            /* ITOFT */
            REQUIRE_REG_31(rb);
            REQUIRE_FEN;
            va = load_gpr(ctx, ra);
            tcg_gen_mov_i64(vc, va);
            break;
        case 0x2A:
            /* SQRTG */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            vb = load_fpr(ctx, rb);
            gen_helper_sqrtg(vc, tcg_env, vb);
            break;
        case 0x02B:
            /* SQRTT */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_sqrtt(ctx, rb, rc, fn11);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x15:
        /* VAX floating point */
        /* XXX: rounding mode and trap are ignored (!) */
        vc = dest_fpr(ctx, rc);
        vb = load_fpr(ctx, rb);
        va = load_fpr(ctx, ra);
        switch (fpfn) { /* fn11 & 0x3F */
        case 0x00:
            /* ADDF */
            REQUIRE_FEN;
            gen_helper_addf(vc, tcg_env, va, vb);
            break;
        case 0x01:
            /* SUBF */
            REQUIRE_FEN;
            gen_helper_subf(vc, tcg_env, va, vb);
            break;
        case 0x02:
            /* MULF */
            REQUIRE_FEN;
            gen_helper_mulf(vc, tcg_env, va, vb);
            break;
        case 0x03:
            /* DIVF */
            REQUIRE_FEN;
            gen_helper_divf(vc, tcg_env, va, vb);
            break;
        case 0x1E:
            /* CVTDG -- TODO */
            REQUIRE_REG_31(ra);
            goto invalid_opc;
        case 0x20:
            /* ADDG */
            REQUIRE_FEN;
            gen_helper_addg(vc, tcg_env, va, vb);
            break;
        case 0x21:
            /* SUBG */
            REQUIRE_FEN;
            gen_helper_subg(vc, tcg_env, va, vb);
            break;
        case 0x22:
            /* MULG */
            REQUIRE_FEN;
            gen_helper_mulg(vc, tcg_env, va, vb);
            break;
        case 0x23:
            /* DIVG */
            REQUIRE_FEN;
            gen_helper_divg(vc, tcg_env, va, vb);
            break;
        case 0x25:
            /* CMPGEQ */
            REQUIRE_FEN;
            gen_helper_cmpgeq(vc, tcg_env, va, vb);
            break;
        case 0x26:
            /* CMPGLT */
            REQUIRE_FEN;
            gen_helper_cmpglt(vc, tcg_env, va, vb);
            break;
        case 0x27:
            /* CMPGLE */
            REQUIRE_FEN;
            gen_helper_cmpgle(vc, tcg_env, va, vb);
            break;
        case 0x2C:
            /* CVTGF */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_helper_cvtgf(vc, tcg_env, vb);
            break;
        case 0x2D:
            /* CVTGD -- TODO */
            REQUIRE_REG_31(ra);
            goto invalid_opc;
        case 0x2F:
            /* CVTGQ */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_helper_cvtgq(vc, tcg_env, vb);
            break;
        case 0x3C:
            /* CVTQF */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_helper_cvtqf(vc, tcg_env, vb);
            break;
        case 0x3E:
            /* CVTQG */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_helper_cvtqg(vc, tcg_env, vb);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x16:
        /* IEEE floating-point */
        switch (fpfn) { /* fn11 & 0x3F */
        case 0x00:
            /* ADDS */
            REQUIRE_FEN;
            gen_adds(ctx, ra, rb, rc, fn11);
            break;
        case 0x01:
            /* SUBS */
            REQUIRE_FEN;
            gen_subs(ctx, ra, rb, rc, fn11);
            break;
        case 0x02:
            /* MULS */
            REQUIRE_FEN;
            gen_muls(ctx, ra, rb, rc, fn11);
            break;
        case 0x03:
            /* DIVS */
            REQUIRE_FEN;
            gen_divs(ctx, ra, rb, rc, fn11);
            break;
        case 0x20:
            /* ADDT */
            REQUIRE_FEN;
            gen_addt(ctx, ra, rb, rc, fn11);
            break;
        case 0x21:
            /* SUBT */
            REQUIRE_FEN;
            gen_subt(ctx, ra, rb, rc, fn11);
            break;
        case 0x22:
            /* MULT */
            REQUIRE_FEN;
            gen_mult(ctx, ra, rb, rc, fn11);
            break;
        case 0x23:
            /* DIVT */
            REQUIRE_FEN;
            gen_divt(ctx, ra, rb, rc, fn11);
            break;
        case 0x24:
            /* CMPTUN */
            REQUIRE_FEN;
            gen_cmptun(ctx, ra, rb, rc, fn11);
            break;
        case 0x25:
            /* CMPTEQ */
            REQUIRE_FEN;
            gen_cmpteq(ctx, ra, rb, rc, fn11);
            break;
        case 0x26:
            /* CMPTLT */
            REQUIRE_FEN;
            gen_cmptlt(ctx, ra, rb, rc, fn11);
            break;
        case 0x27:
            /* CMPTLE */
            REQUIRE_FEN;
            gen_cmptle(ctx, ra, rb, rc, fn11);
            break;
        case 0x2C:
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            if (fn11 == 0x2AC || fn11 == 0x6AC) {
                /* CVTST */
                gen_cvtst(ctx, rb, rc, fn11);
            } else {
                /* CVTTS */
                gen_cvtts(ctx, rb, rc, fn11);
            }
            break;
        case 0x2F:
            /* CVTTQ */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_cvttq(ctx, rb, rc, fn11);
            break;
        case 0x3C:
            /* CVTQS */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_cvtqs(ctx, rb, rc, fn11);
            break;
        case 0x3E:
            /* CVTQT */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            gen_cvtqt(ctx, rb, rc, fn11);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x17:
        switch (fn11) {
        case 0x010:
            /* CVTLQ */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            vc = dest_fpr(ctx, rc);
            vb = load_fpr(ctx, rb);
            gen_cvtlq(vc, vb);
            break;
        case 0x020:
            /* CPYS */
            REQUIRE_FEN;
            if (rc == 31) {
                /* Special case CPYS as FNOP.  */
            } else {
                vc = dest_fpr(ctx, rc);
                va = load_fpr(ctx, ra);
                if (ra == rb) {
                    /* Special case CPYS as FMOV.  */
                    tcg_gen_mov_i64(vc, va);
                } else {
                    vb = load_fpr(ctx, rb);
                    gen_cpy_mask(vc, va, vb, 0, 0x8000000000000000ULL);
                }
            }
            break;
        case 0x021:
            /* CPYSN */
            REQUIRE_FEN;
            vc = dest_fpr(ctx, rc);
            vb = load_fpr(ctx, rb);
            va = load_fpr(ctx, ra);
            gen_cpy_mask(vc, va, vb, 1, 0x8000000000000000ULL);
            break;
        case 0x022:
            /* CPYSE */
            REQUIRE_FEN;
            vc = dest_fpr(ctx, rc);
            vb = load_fpr(ctx, rb);
            va = load_fpr(ctx, ra);
            gen_cpy_mask(vc, va, vb, 0, 0xFFF0000000000000ULL);
            break;
        case 0x024:
            /* MT_FPCR */
            REQUIRE_FEN;
            va = load_fpr(ctx, ra);
            gen_helper_store_fpcr(tcg_env, va);
            if (ctx->tb_rm == QUAL_RM_D) {
                /* Re-do the copy of the rounding mode to fp_status
                   the next time we use dynamic rounding.  */
                ctx->tb_rm = -1;
            }
            break;
        case 0x025:
            /* MF_FPCR */
            REQUIRE_FEN;
            va = dest_fpr(ctx, ra);
            gen_helper_load_fpcr(va, tcg_env);
            break;
        case 0x02A:
            /* FCMOVEQ */
            REQUIRE_FEN;
            gen_fcmov(ctx, TCG_COND_EQ, ra, rb, rc);
            break;
        case 0x02B:
            /* FCMOVNE */
            REQUIRE_FEN;
            gen_fcmov(ctx, TCG_COND_NE, ra, rb, rc);
            break;
        case 0x02C:
            /* FCMOVLT */
            REQUIRE_FEN;
            gen_fcmov(ctx, TCG_COND_LT, ra, rb, rc);
            break;
        case 0x02D:
            /* FCMOVGE */
            REQUIRE_FEN;
            gen_fcmov(ctx, TCG_COND_GE, ra, rb, rc);
            break;
        case 0x02E:
            /* FCMOVLE */
            REQUIRE_FEN;
            gen_fcmov(ctx, TCG_COND_LE, ra, rb, rc);
            break;
        case 0x02F:
            /* FCMOVGT */
            REQUIRE_FEN;
            gen_fcmov(ctx, TCG_COND_GT, ra, rb, rc);
            break;
        case 0x030: /* CVTQL */
        case 0x130: /* CVTQL/V */
        case 0x530: /* CVTQL/SV */
            REQUIRE_REG_31(ra);
            REQUIRE_FEN;
            vc = dest_fpr(ctx, rc);
            vb = load_fpr(ctx, rb);
            gen_helper_cvtql(vc, tcg_env, vb);
            gen_fp_exc_raise(rc, fn11);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x18:
        switch ((uint16_t)disp16) {
        case 0x0000:
            /* TRAPB */
            /* No-op.  */
            break;
        case 0x0400:
            /* EXCB */
            /* No-op.  */
            break;
        case 0x4000:
            /* MB */
            tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
            break;
        case 0x4400:
            /* WMB */
            tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
            break;
        case 0x8000:
            /* FETCH */
            /* No-op */
            break;
        case 0xA000:
            /* FETCH_M */
            /* No-op */
            break;
        case 0xC000:
            /* RPCC */
            va = dest_gpr(ctx, ra);
            if (translator_io_start(&ctx->base)) {
                ret = DISAS_PC_STALE;
            }
            gen_helper_load_pcc(va, tcg_env);
            break;
        case 0xE000:
            /* RC */
            gen_rx(ctx, ra, 0);
            break;
        case 0xE800:
            /* ECB */
            break;
        case 0xF000:
            /* RS */
            gen_rx(ctx, ra, 1);
            break;
        case 0xF800:
            /* WH64 */
            /* No-op */
            break;
        case 0xFC00:
            /* WH64EN */
            /* No-op */
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x19:
        /* HW_MFPR (PALcode) */
#ifndef CONFIG_USER_ONLY
        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
        va = dest_gpr(ctx, ra);
        ret = gen_mfpr(ctx, va, insn & 0xffff);
        break;
#else
        goto invalid_opc;
#endif

    case 0x1A:
        /* JMP, JSR, RET, JSR_COROUTINE.  These only differ by the branch
           prediction stack action, which of course we don't implement.  */
        vb = load_gpr(ctx, rb);
        tcg_gen_andi_i64(cpu_pc, vb, ~3);
        if (ra != 31) {
            tcg_gen_movi_i64(ctx->ir[ra], ctx->base.pc_next);
        }
        ret = DISAS_PC_UPDATED;
        break;

    case 0x1B:
        /* HW_LD (PALcode) */
#ifndef CONFIG_USER_ONLY
        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
        {
            TCGv addr = tcg_temp_new();
            vb = load_gpr(ctx, rb);
            va = dest_gpr(ctx, ra);

            tcg_gen_addi_i64(addr, vb, disp12);
            switch ((insn >> 12) & 0xF) {
            case 0x0:
                /* Longword physical access (hw_ldl/p) */
                tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
                break;
            case 0x1:
                /* Quadword physical access (hw_ldq/p) */
                tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
                break;
            case 0x2:
                /* Longword physical access with lock (hw_ldl_l/p) */
                tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
                tcg_gen_mov_i64(cpu_lock_addr, addr);
                tcg_gen_mov_i64(cpu_lock_value, va);
                break;
            case 0x3:
                /* Quadword physical access with lock (hw_ldq_l/p) */
                tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
                tcg_gen_mov_i64(cpu_lock_addr, addr);
                tcg_gen_mov_i64(cpu_lock_value, va);
                break;
            case 0x4:
                /* Longword virtual PTE fetch (hw_ldl/v) */
                goto invalid_opc;
            case 0x5:
                /* Quadword virtual PTE fetch (hw_ldq/v) */
                goto invalid_opc;
                break;
            case 0x6:
                /* Invalid */
                goto invalid_opc;
            case 0x7:
                /* Invaliid */
                goto invalid_opc;
            case 0x8:
                /* Longword virtual access (hw_ldl) */
                goto invalid_opc;
            case 0x9:
                /* Quadword virtual access (hw_ldq) */
                goto invalid_opc;
            case 0xA:
                /* Longword virtual access with protection check (hw_ldl/w) */
                tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
                                    MO_LESL | MO_ALIGN);
                break;
            case 0xB:
                /* Quadword virtual access with protection check (hw_ldq/w) */
                tcg_gen_qemu_ld_i64(va, addr, MMU_KERNEL_IDX,
                                    MO_LEUQ | MO_ALIGN);
                break;
            case 0xC:
                /* Longword virtual access with alt access mode (hw_ldl/a)*/
                goto invalid_opc;
            case 0xD:
                /* Quadword virtual access with alt access mode (hw_ldq/a) */
                goto invalid_opc;
            case 0xE:
                /* Longword virtual access with alternate access mode and
                   protection checks (hw_ldl/wa) */
                tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
                                    MO_LESL | MO_ALIGN);
                break;
            case 0xF:
                /* Quadword virtual access with alternate access mode and
                   protection checks (hw_ldq/wa) */
                tcg_gen_qemu_ld_i64(va, addr, MMU_USER_IDX,
                                    MO_LEUQ | MO_ALIGN);
                break;
            }
            break;
        }
#else
        goto invalid_opc;
#endif

    case 0x1C:
        vc = dest_gpr(ctx, rc);
        if (fn7 == 0x70) {
            /* FTOIT */
            REQUIRE_AMASK(FIX);
            REQUIRE_REG_31(rb);
            va = load_fpr(ctx, ra);
            tcg_gen_mov_i64(vc, va);
            break;
        } else if (fn7 == 0x78) {
            /* FTOIS */
            REQUIRE_AMASK(FIX);
            REQUIRE_REG_31(rb);
            t32 = tcg_temp_new_i32();
            va = load_fpr(ctx, ra);
            gen_helper_s_to_memory(t32, va);
            tcg_gen_ext_i32_i64(vc, t32);
            break;
        }

        vb = load_gpr_lit(ctx, rb, lit, islit);
        switch (fn7) {
        case 0x00:
            /* SEXTB */
            REQUIRE_AMASK(BWX);
            REQUIRE_REG_31(ra);
            tcg_gen_ext8s_i64(vc, vb);
            break;
        case 0x01:
            /* SEXTW */
            REQUIRE_AMASK(BWX);
            REQUIRE_REG_31(ra);
            tcg_gen_ext16s_i64(vc, vb);
            break;
        case 0x30:
            /* CTPOP */
            REQUIRE_AMASK(CIX);
            REQUIRE_REG_31(ra);
            REQUIRE_NO_LIT;
            tcg_gen_ctpop_i64(vc, vb);
            break;
        case 0x31:
            /* PERR */
            REQUIRE_AMASK(MVI);
            REQUIRE_NO_LIT;
            va = load_gpr(ctx, ra);
            gen_helper_perr(vc, va, vb);
            break;
        case 0x32:
            /* CTLZ */
            REQUIRE_AMASK(CIX);
            REQUIRE_REG_31(ra);
            REQUIRE_NO_LIT;
            tcg_gen_clzi_i64(vc, vb, 64);
            break;
        case 0x33:
            /* CTTZ */
            REQUIRE_AMASK(CIX);
            REQUIRE_REG_31(ra);
            REQUIRE_NO_LIT;
            tcg_gen_ctzi_i64(vc, vb, 64);
            break;
        case 0x34:
            /* UNPKBW */
            REQUIRE_AMASK(MVI);
            REQUIRE_REG_31(ra);
            REQUIRE_NO_LIT;
            gen_helper_unpkbw(vc, vb);
            break;
        case 0x35:
            /* UNPKBL */
            REQUIRE_AMASK(MVI);
            REQUIRE_REG_31(ra);
            REQUIRE_NO_LIT;
            gen_helper_unpkbl(vc, vb);
            break;
        case 0x36:
            /* PKWB */
            REQUIRE_AMASK(MVI);
            REQUIRE_REG_31(ra);
            REQUIRE_NO_LIT;
            gen_helper_pkwb(vc, vb);
            break;
        case 0x37:
            /* PKLB */
            REQUIRE_AMASK(MVI);
            REQUIRE_REG_31(ra);
            REQUIRE_NO_LIT;
            gen_helper_pklb(vc, vb);
            break;
        case 0x38:
            /* MINSB8 */
            REQUIRE_AMASK(MVI);
            va = load_gpr(ctx, ra);
            gen_helper_minsb8(vc, va, vb);
            break;
        case 0x39:
            /* MINSW4 */
            REQUIRE_AMASK(MVI);
            va = load_gpr(ctx, ra);
            gen_helper_minsw4(vc, va, vb);
            break;
        case 0x3A:
            /* MINUB8 */
            REQUIRE_AMASK(MVI);
            va = load_gpr(ctx, ra);
            gen_helper_minub8(vc, va, vb);
            break;
        case 0x3B:
            /* MINUW4 */
            REQUIRE_AMASK(MVI);
            va = load_gpr(ctx, ra);
            gen_helper_minuw4(vc, va, vb);
            break;
        case 0x3C:
            /* MAXUB8 */
            REQUIRE_AMASK(MVI);
            va = load_gpr(ctx, ra);
            gen_helper_maxub8(vc, va, vb);
            break;
        case 0x3D:
            /* MAXUW4 */
            REQUIRE_AMASK(MVI);
            va = load_gpr(ctx, ra);
            gen_helper_maxuw4(vc, va, vb);
            break;
        case 0x3E:
            /* MAXSB8 */
            REQUIRE_AMASK(MVI);
            va = load_gpr(ctx, ra);
            gen_helper_maxsb8(vc, va, vb);
            break;
        case 0x3F:
            /* MAXSW4 */
            REQUIRE_AMASK(MVI);
            va = load_gpr(ctx, ra);
            gen_helper_maxsw4(vc, va, vb);
            break;
        default:
            goto invalid_opc;
        }
        break;

    case 0x1D:
        /* HW_MTPR (PALcode) */
#ifndef CONFIG_USER_ONLY
        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
        vb = load_gpr(ctx, rb);
        ret = gen_mtpr(ctx, vb, insn & 0xffff);
        break;
#else
        goto invalid_opc;
#endif

    case 0x1E:
        /* HW_RET (PALcode) */
#ifndef CONFIG_USER_ONLY
        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
        if (rb == 31) {
            /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
               address from EXC_ADDR.  This turns out to be useful for our
               emulation PALcode, so continue to accept it.  */
            vb = dest_sink(ctx);
            tcg_gen_ld_i64(vb, tcg_env, offsetof(CPUAlphaState, exc_addr));
        } else {
            vb = load_gpr(ctx, rb);
        }
        tcg_gen_movi_i64(cpu_lock_addr, -1);
        st_flag_byte(load_zero(ctx), ENV_FLAG_RX_SHIFT);
        tmp = tcg_temp_new();
        tcg_gen_andi_i64(tmp, vb, 1);
        st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT);
        tcg_gen_andi_i64(cpu_pc, vb, ~3);
        /* Allow interrupts to be recognized right away.  */
        ret = DISAS_PC_UPDATED_NOCHAIN;
        break;
#else
        goto invalid_opc;
#endif

    case 0x1F:
        /* HW_ST (PALcode) */
#ifndef CONFIG_USER_ONLY
        REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE);
        {
            switch ((insn >> 12) & 0xF) {
            case 0x0:
                /* Longword physical access */
                va = load_gpr(ctx, ra);
                vb = load_gpr(ctx, rb);
                tmp = tcg_temp_new();
                tcg_gen_addi_i64(tmp, vb, disp12);
                tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
                break;
            case 0x1:
                /* Quadword physical access */
                va = load_gpr(ctx, ra);
                vb = load_gpr(ctx, rb);
                tmp = tcg_temp_new();
                tcg_gen_addi_i64(tmp, vb, disp12);
                tcg_gen_qemu_st_i64(va, tmp, MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
                break;
            case 0x2:
                /* Longword physical access with lock */
                ret = gen_store_conditional(ctx, ra, rb, disp12,
                                            MMU_PHYS_IDX, MO_LESL | MO_ALIGN);
                break;
            case 0x3:
                /* Quadword physical access with lock */
                ret = gen_store_conditional(ctx, ra, rb, disp12,
                                            MMU_PHYS_IDX, MO_LEUQ | MO_ALIGN);
                break;
            case 0x4:
                /* Longword virtual access */
                goto invalid_opc;
            case 0x5:
                /* Quadword virtual access */
                goto invalid_opc;
            case 0x6:
                /* Invalid */
                goto invalid_opc;
            case 0x7:
                /* Invalid */
                goto invalid_opc;
            case 0x8:
                /* Invalid */
                goto invalid_opc;
            case 0x9:
                /* Invalid */
                goto invalid_opc;
            case 0xA:
                /* Invalid */
                goto invalid_opc;
            case 0xB:
                /* Invalid */
                goto invalid_opc;
            case 0xC:
                /* Longword virtual access with alternate access mode */
                goto invalid_opc;
            case 0xD:
                /* Quadword virtual access with alternate access mode */
                goto invalid_opc;
            case 0xE:
                /* Invalid */
                goto invalid_opc;
            case 0xF:
                /* Invalid */
                goto invalid_opc;
            }
            break;
        }
#else
        goto invalid_opc;
#endif
    case 0x20:
        /* LDF */
        REQUIRE_FEN;
        gen_load_fp(ctx, ra, rb, disp16, gen_ldf);
        break;
    case 0x21:
        /* LDG */
        REQUIRE_FEN;
        gen_load_fp(ctx, ra, rb, disp16, gen_ldg);
        break;
    case 0x22:
        /* LDS */
        REQUIRE_FEN;
        gen_load_fp(ctx, ra, rb, disp16, gen_lds);
        break;
    case 0x23:
        /* LDT */
        REQUIRE_FEN;
        gen_load_fp(ctx, ra, rb, disp16, gen_ldt);
        break;
    case 0x24:
        /* STF */
        REQUIRE_FEN;
        gen_store_fp(ctx, ra, rb, disp16, gen_stf);
        break;
    case 0x25:
        /* STG */
        REQUIRE_FEN;
        gen_store_fp(ctx, ra, rb, disp16, gen_stg);
        break;
    case 0x26:
        /* STS */
        REQUIRE_FEN;
        gen_store_fp(ctx, ra, rb, disp16, gen_sts);
        break;
    case 0x27:
        /* STT */
        REQUIRE_FEN;
        gen_store_fp(ctx, ra, rb, disp16, gen_stt);
        break;
    case 0x28:
        /* LDL */
        gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0);
        break;
    case 0x29:
        /* LDQ */
        gen_load_int(ctx, ra, rb, disp16, MO_LEUQ, 0, 0);
        break;
    case 0x2A:
        /* LDL_L */
        gen_load_int(ctx, ra, rb, disp16, MO_LESL | MO_ALIGN, 0, 1);
        break;
    case 0x2B:
        /* LDQ_L */
        gen_load_int(ctx, ra, rb, disp16, MO_LEUQ | MO_ALIGN, 0, 1);
        break;
    case 0x2C:
        /* STL */
        gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0);
        break;
    case 0x2D:
        /* STQ */
        gen_store_int(ctx, ra, rb, disp16, MO_LEUQ, 0);
        break;
    case 0x2E:
        /* STL_C */
        ret = gen_store_conditional(ctx, ra, rb, disp16,
                                    ctx->mem_idx, MO_LESL | MO_ALIGN);
        break;
    case 0x2F:
        /* STQ_C */
        ret = gen_store_conditional(ctx, ra, rb, disp16,
                                    ctx->mem_idx, MO_LEUQ | MO_ALIGN);
        break;
    case 0x30:
        /* BR */
        ret = gen_bdirect(ctx, ra, disp21);
        break;
    case 0x31: /* FBEQ */
        REQUIRE_FEN;
        ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
        break;
    case 0x32: /* FBLT */
        REQUIRE_FEN;
        ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
        break;
    case 0x33: /* FBLE */
        REQUIRE_FEN;
        ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
        break;
    case 0x34:
        /* BSR */
        ret = gen_bdirect(ctx, ra, disp21);
        break;
    case 0x35: /* FBNE */
        REQUIRE_FEN;
        ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
        break;
    case 0x36: /* FBGE */
        REQUIRE_FEN;
        ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
        break;
    case 0x37: /* FBGT */
        REQUIRE_FEN;
        ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
        break;
    case 0x38:
        /* BLBC */
        ret = gen_bcond(ctx, TCG_COND_TSTEQ, ra, disp21);
        break;
    case 0x39:
        /* BEQ */
        ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21);
        break;
    case 0x3A:
        /* BLT */
        ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21);
        break;
    case 0x3B:
        /* BLE */
        ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21);
        break;
    case 0x3C:
        /* BLBS */
        ret = gen_bcond(ctx, TCG_COND_TSTNE, ra, disp21);
        break;
    case 0x3D:
        /* BNE */
        ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21);
        break;
    case 0x3E:
        /* BGE */
        ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21);
        break;
    case 0x3F:
        /* BGT */
        ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21);
        break;
    invalid_opc:
        ret = gen_invalid(ctx);
        break;
    raise_fen:
        ret = gen_excp(ctx, EXCP_FEN, 0);
        break;
    }

    return ret;
}

static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
{
    DisasContext *ctx = container_of(dcbase, DisasContext, base);
    CPUAlphaState *env = cpu_env(cpu);
    int64_t bound;

    ctx->tbflags = ctx->base.tb->flags;
    ctx->mem_idx = alpha_env_mmu_index(env);
    ctx->implver = env->implver;
    ctx->amask = env->amask;

#ifdef CONFIG_USER_ONLY
    ctx->ir = cpu_std_ir;
    ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
#else
    ctx->palbr = env->palbr;
    ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir);
#endif

    /* ??? Every TB begins with unset rounding mode, to be initialized on
       the first fp insn of the TB.  Alternately we could define a proper
       default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
       to reset the FP_STATUS to that default at the end of any TB that
       changes the default.  We could even (gasp) dynamically figure out
       what default would be most efficient given the running program.  */
    ctx->tb_rm = -1;
    /* Similarly for flush-to-zero.  */
    ctx->tb_ftz = -1;

    ctx->zero = NULL;
    ctx->sink = NULL;

    /* Bound the number of insns to execute to those left on the page.  */
    bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
    ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
}

static void alpha_tr_tb_start(DisasContextBase *db, CPUState *cpu)
{
}

static void alpha_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
    tcg_gen_insn_start(dcbase->pc_next);
}

static void alpha_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
{
    DisasContext *ctx = container_of(dcbase, DisasContext, base);
    CPUAlphaState *env = cpu_env(cpu);
    uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);

    ctx->base.pc_next += 4;
    ctx->base.is_jmp = translate_one(ctx, insn);

    free_context_temps(ctx);
}

static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
{
    DisasContext *ctx = container_of(dcbase, DisasContext, base);

    switch (ctx->base.is_jmp) {
    case DISAS_NORETURN:
        break;
    case DISAS_TOO_MANY:
        if (use_goto_tb(ctx, ctx->base.pc_next)) {
            tcg_gen_goto_tb(0);
            tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
            tcg_gen_exit_tb(ctx->base.tb, 0);
        }
        /* FALLTHRU */
    case DISAS_PC_STALE:
        tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next);
        /* FALLTHRU */
    case DISAS_PC_UPDATED:
        tcg_gen_lookup_and_goto_ptr();
        break;
    case DISAS_PC_UPDATED_NOCHAIN:
        tcg_gen_exit_tb(NULL, 0);
        break;
    default:
        g_assert_not_reached();
    }
}

static void alpha_tr_disas_log(const DisasContextBase *dcbase,
                               CPUState *cpu, FILE *logfile)
{
    fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
    target_disas(logfile, cpu, dcbase->pc_first, dcbase->tb->size);
}

static const TranslatorOps alpha_tr_ops = {
    .init_disas_context = alpha_tr_init_disas_context,
    .tb_start           = alpha_tr_tb_start,
    .insn_start         = alpha_tr_insn_start,
    .translate_insn     = alpha_tr_translate_insn,
    .tb_stop            = alpha_tr_tb_stop,
    .disas_log          = alpha_tr_disas_log,
};

void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
                           vaddr pc, void *host_pc)
{
    DisasContext dc;
    translator_loop(cpu, tb, max_insns, pc, host_pc, &alpha_tr_ops, &dc.base);
}