summaryrefslogtreecommitdiff
path: root/mali_kbase/hwcnt/backend/mali_kbase_hwcnt_backend_jm_watchdog.c
blob: a8654ea48941bb554bc3fc5d74c476375e474373 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
 *
 * (C) COPYRIGHT 2021-2022 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

#include <mali_kbase.h>

#include <hwcnt/mali_kbase_hwcnt_gpu.h>
#include <hwcnt/mali_kbase_hwcnt_types.h>

#include <hwcnt/backend/mali_kbase_hwcnt_backend.h>
#include <hwcnt/backend/mali_kbase_hwcnt_backend_jm_watchdog.h>
#include <hwcnt/mali_kbase_hwcnt_watchdog_if.h>

#if IS_ENABLED(CONFIG_MALI_IS_FPGA) && !IS_ENABLED(CONFIG_MALI_NO_MALI)
/* Backend watch dog timer interval in milliseconds: 18 seconds. */
static const u32 hwcnt_backend_watchdog_timer_interval_ms = 18000;
#else
/* Backend watch dog timer interval in milliseconds: 1 second. */
static const u32 hwcnt_backend_watchdog_timer_interval_ms = 1000;
#endif /* IS_FPGA && !NO_MALI */

/*
 * IDLE_BUFFER_EMPTY -> USER_DUMPING_BUFFER_EMPTY     on dump_request.
 * IDLE_BUFFER_EMPTY -> TIMER_DUMPING                 after
 *                                                    hwcnt_backend_watchdog_timer_interval_ms
 *                                                    milliseconds, if no dump_request has been
 *                                                    called in the meantime.
 * IDLE_BUFFER_FULL  -> USER_DUMPING_BUFFER_FULL      on dump_request.
 * IDLE_BUFFER_FULL  -> TIMER_DUMPING                 after
 *                                                    hwcnt_backend_watchdog_timer_interval_ms
 *                                                    milliseconds, if no dump_request has been
 *                                                    called in the meantime.
 * IDLE_BUFFER_FULL -> IDLE_BUFFER_EMPTY              on dump_disable, upon discarding undumped
 *                                                    counter values since the last dump_get.
 * IDLE_BUFFER_EMPTY -> BUFFER_CLEARING               on dump_clear, before calling job manager
 *                                                    backend dump_clear.
 * IDLE_BUFFER_FULL  -> BUFFER_CLEARING               on dump_clear, before calling job manager
 *                                                    backend dump_clear.
 * USER_DUMPING_BUFFER_EMPTY -> BUFFER_CLEARING       on dump_clear, before calling job manager
 *                                                    backend dump_clear.
 * USER_DUMPING_BUFFER_FULL  -> BUFFER_CLEARING       on dump_clear, before calling job manager
 *                                                    backend dump_clear.
 * BUFFER_CLEARING -> IDLE_BUFFER_EMPTY               on dump_clear, upon job manager backend
 *                                                    dump_clear completion.
 * TIMER_DUMPING -> IDLE_BUFFER_FULL                  on timer's callback completion.
 * TIMER_DUMPING -> TIMER_DUMPING_USER_CLEAR          on dump_clear, notifies the callback thread
 *                                                    that there is no need for dumping the buffer
 *                                                    anymore, and that the client will proceed
 *                                                    clearing the buffer.
 * TIMER_DUMPING_USER_CLEAR -> IDLE_BUFFER_EMPTY      on timer's callback completion, when a user
 *                                                    requested a dump_clear.
 * TIMER_DUMPING -> TIMER_DUMPING_USER_REQUESTED      on dump_request, when a client performs a
 *                                                    dump request while the timer is dumping (the
 *                                                    timer will perform the dump and (once
 *                                                    completed) the client will retrieve the value
 *                                                    from the buffer).
 * TIMER_DUMPING_USER_REQUESTED -> IDLE_BUFFER_EMPTY  on dump_get, when a timer completed and the
 *                                                    user reads the periodic dump buffer.
 * Any -> ERROR                                       if the job manager backend returns an error
 *                                                    (of any kind).
 * USER_DUMPING_BUFFER_EMPTY -> IDLE_BUFFER_EMPTY     on dump_get (performs get, ignores the
 *                                                    periodic dump buffer and returns).
 * USER_DUMPING_BUFFER_FULL  -> IDLE_BUFFER_EMPTY     on dump_get (performs get, accumulates with
 *                                                    periodic dump buffer and returns).
 */

/** enum backend_watchdog_state State used to synchronize timer callbacks with the main thread.
 * @HWCNT_JM_WD_ERROR: Received an error from the job manager backend calls.
 * @HWCNT_JM_WD_IDLE_BUFFER_EMPTY: Initial state. Watchdog timer enabled, periodic dump buffer is
 *                                 empty.
 * @HWCNT_JM_WD_IDLE_BUFFER_FULL: Watchdog timer enabled, periodic dump buffer is full.
 * @HWCNT_JM_WD_BUFFER_CLEARING: The client is performing a dump clear. A concurrent timer callback
 *                               thread should just ignore and reschedule another callback in
 *                               hwcnt_backend_watchdog_timer_interval_ms milliseconds.
 * @HWCNT_JM_WD_TIMER_DUMPING: The timer ran out. The callback is performing a periodic dump.
 * @HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED: While the timer is performing a periodic dump, user
 *                                            requested a dump.
 * @HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR: While the timer is performing a dump, user requested a
 *                                        dump_clear. The timer has to complete the periodic dump
 *                                        and clear buffer (internal and job manager backend).
 * @HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY: From IDLE state, user requested a dump. The periodic
 *                                         dump buffer is empty.
 * @HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL: From IDLE state, user requested a dump. The periodic dump
 *                                        buffer is full.
 *
 * While the state machine is in HWCNT_JM_WD_TIMER_DUMPING*, only the timer callback thread is
 * allowed to call the job manager backend layer.
 */
enum backend_watchdog_state {
	HWCNT_JM_WD_ERROR,
	HWCNT_JM_WD_IDLE_BUFFER_EMPTY,
	HWCNT_JM_WD_IDLE_BUFFER_FULL,
	HWCNT_JM_WD_BUFFER_CLEARING,
	HWCNT_JM_WD_TIMER_DUMPING,
	HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED,
	HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR,
	HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY,
	HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL,
};

/** enum wd_init_state - State machine for initialization / termination of the backend resources
 */
enum wd_init_state {
	HWCNT_JM_WD_INIT_START,
	HWCNT_JM_WD_INIT_BACKEND = HWCNT_JM_WD_INIT_START,
	HWCNT_JM_WD_INIT_ENABLE_MAP,
	HWCNT_JM_WD_INIT_DUMP_BUFFER,
	HWCNT_JM_WD_INIT_END
};

/**
 * struct kbase_hwcnt_backend_jm_watchdog_info - Immutable information used to initialize an
 *                                               instance of the job manager watchdog backend.
 * @jm_backend_iface: Hardware counter backend interface. This module extends
 *                    this interface with a watchdog that performs regular
 *                    dumps. The new interface this module provides complies
 *                    with the old backend interface.
 * @dump_watchdog_iface: Dump watchdog interface, used to periodically dump the
 *                       hardware counter in case no reads are requested within
 *                       a certain time, used to avoid hardware counter's buffer
 *                       saturation.
 */
struct kbase_hwcnt_backend_jm_watchdog_info {
	struct kbase_hwcnt_backend_interface *jm_backend_iface;
	struct kbase_hwcnt_watchdog_interface *dump_watchdog_iface;
};

/**
 * struct kbase_hwcnt_backend_jm_watchdog - An instance of the job manager watchdog backend.
 * @info: Immutable information used to create the job manager watchdog backend.
 * @jm_backend: Job manager's backend internal state. To be passed as argument during parent calls.
 * @timeout_ms: Time period in milliseconds for hardware counters dumping.
 * @wd_dump_buffer: Used to store periodic dumps done by a timer callback function. Contents are
 *                  valid in state %HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED,
 *                  %HWCNT_JM_WD_IDLE_BUFFER_FULL or %HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL.
 * @wd_enable_map: Watchdog backend internal buffer mask, initialized during dump_enable copying
 *                 the enable_map passed as argument.
 * @wd_dump_timestamp: Holds the dumping timestamp for potential future client dump_request, filled
 *                     during watchdog timer dumps.
 * @watchdog_complete: Used for synchronization between watchdog dumper thread and client calls.
 * @locked: Members protected from concurrent access by different threads.
 * @locked.watchdog_lock: Lock used to access fields within this struct (that require mutual
 *                        exclusion).
 * @locked.is_enabled: If true then the wrapped job manager hardware counter backend and the
 *                     watchdog timer are both enabled. If false then both are disabled (or soon
 *                     will be). Races between enable and disable have undefined behavior.
 * @locked.state: State used to synchronize timer callbacks with the main thread.
 */
struct kbase_hwcnt_backend_jm_watchdog {
	const struct kbase_hwcnt_backend_jm_watchdog_info *info;
	struct kbase_hwcnt_backend *jm_backend;
	u32 timeout_ms;
	struct kbase_hwcnt_dump_buffer wd_dump_buffer;
	struct kbase_hwcnt_enable_map wd_enable_map;
	u64 wd_dump_timestamp;
	struct completion watchdog_complete;
	struct {
		spinlock_t watchdog_lock;
		bool is_enabled;
		enum backend_watchdog_state state;
	} locked;
};

/* timer's callback function */
static void kbasep_hwcnt_backend_jm_watchdog_timer_callback(void *backend)
{
	struct kbase_hwcnt_backend_jm_watchdog *wd_backend = backend;
	unsigned long flags;
	bool wd_accumulate;

	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);

	if (!wd_backend->locked.is_enabled || wd_backend->locked.state == HWCNT_JM_WD_ERROR) {
		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		return;
	}

	if (!(wd_backend->locked.state == HWCNT_JM_WD_IDLE_BUFFER_EMPTY ||
	      wd_backend->locked.state == HWCNT_JM_WD_IDLE_BUFFER_FULL)) {
		/*resetting the timer. Calling modify on a disabled timer enables it.*/
		wd_backend->info->dump_watchdog_iface->modify(
			wd_backend->info->dump_watchdog_iface->timer, wd_backend->timeout_ms);
		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		return;
	}
	/*start performing the dump*/

	/* if there has been a previous timeout use accumulating dump_get()
	 * otherwise use non-accumulating to overwrite buffer
	 */
	wd_accumulate = (wd_backend->locked.state == HWCNT_JM_WD_IDLE_BUFFER_FULL);

	wd_backend->locked.state = HWCNT_JM_WD_TIMER_DUMPING;

	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);

	if (wd_backend->info->jm_backend_iface->dump_request(wd_backend->jm_backend,
							     &wd_backend->wd_dump_timestamp) ||
	    wd_backend->info->jm_backend_iface->dump_wait(wd_backend->jm_backend) ||
	    wd_backend->info->jm_backend_iface->dump_get(
		    wd_backend->jm_backend, &wd_backend->wd_dump_buffer, &wd_backend->wd_enable_map,
		    wd_accumulate)) {
		spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
		WARN_ON(wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING &&
			wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR &&
			wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED);
		wd_backend->locked.state = HWCNT_JM_WD_ERROR;
		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		/* Unblock user if it's waiting. */
		complete_all(&wd_backend->watchdog_complete);
		return;
	}

	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
	WARN_ON(wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING &&
		wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR &&
		wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED);

	if (wd_backend->locked.state == HWCNT_JM_WD_TIMER_DUMPING) {
		/* If there is no user request/clear, transit to HWCNT_JM_WD_IDLE_BUFFER_FULL
		 * to indicate timer dump is done and the buffer is full. If state changed to
		 * HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED or
		 * HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR then user will transit the state
		 * machine to next state.
		 */
		wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_FULL;
	}
	if (wd_backend->locked.state != HWCNT_JM_WD_ERROR && wd_backend->locked.is_enabled) {
		/* reset the timer to schedule another callback. Calling modify on a
		 * disabled timer enables it.
		 */
		/*The spin lock needs to be held in case the client calls dump_enable*/
		wd_backend->info->dump_watchdog_iface->modify(
			wd_backend->info->dump_watchdog_iface->timer, wd_backend->timeout_ms);
	}
	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);

	/* Unblock user if it's waiting. */
	complete_all(&wd_backend->watchdog_complete);
}

/* helper methods, info structure creation and destruction*/

static struct kbase_hwcnt_backend_jm_watchdog_info *
kbasep_hwcnt_backend_jm_watchdog_info_create(struct kbase_hwcnt_backend_interface *backend_iface,
					     struct kbase_hwcnt_watchdog_interface *watchdog_iface)
{
	struct kbase_hwcnt_backend_jm_watchdog_info *const info =
		kmalloc(sizeof(*info), GFP_KERNEL);

	if (!info)
		return NULL;

	*info = (struct kbase_hwcnt_backend_jm_watchdog_info){ .jm_backend_iface = backend_iface,
							       .dump_watchdog_iface =
								       watchdog_iface };

	return info;
}

/****** kbase_hwcnt_backend_interface implementation *******/

/* Job manager watchdog backend, implementation of kbase_hwcnt_backend_metadata_fn */
static const struct kbase_hwcnt_metadata *
kbasep_hwcnt_backend_jm_watchdog_metadata(const struct kbase_hwcnt_backend_info *info)
{
	const struct kbase_hwcnt_backend_jm_watchdog_info *wd_info = (void *)info;

	if (WARN_ON(!info))
		return NULL;

	return wd_info->jm_backend_iface->metadata(wd_info->jm_backend_iface->info);
}

static void
kbasep_hwcnt_backend_jm_watchdog_term_partial(struct kbase_hwcnt_backend_jm_watchdog *wd_backend,
					      enum wd_init_state state)
{
	if (!wd_backend)
		return;

	WARN_ON(state > HWCNT_JM_WD_INIT_END);

	while (state-- > HWCNT_JM_WD_INIT_START) {
		switch (state) {
		case HWCNT_JM_WD_INIT_BACKEND:
			wd_backend->info->jm_backend_iface->term(wd_backend->jm_backend);
			break;
		case HWCNT_JM_WD_INIT_ENABLE_MAP:
			kbase_hwcnt_enable_map_free(&wd_backend->wd_enable_map);
			break;
		case HWCNT_JM_WD_INIT_DUMP_BUFFER:
			kbase_hwcnt_dump_buffer_free(&wd_backend->wd_dump_buffer);
			break;
		case HWCNT_JM_WD_INIT_END:
			break;
		}
	}

	kfree(wd_backend);
}

/* Job manager watchdog backend, implementation of kbase_hwcnt_backend_term_fn
 * Calling term does *not* destroy the interface
 */
static void kbasep_hwcnt_backend_jm_watchdog_term(struct kbase_hwcnt_backend *backend)
{
	struct kbase_hwcnt_backend_jm_watchdog *wd_backend =
		(struct kbase_hwcnt_backend_jm_watchdog *)backend;

	if (!backend)
		return;

	/* disable timer thread to avoid concurrent access to shared resources */
	wd_backend->info->dump_watchdog_iface->disable(
		wd_backend->info->dump_watchdog_iface->timer);

	kbasep_hwcnt_backend_jm_watchdog_term_partial(wd_backend, HWCNT_JM_WD_INIT_END);
}

/* Job manager watchdog backend, implementation of kbase_hwcnt_backend_init_fn */
static int kbasep_hwcnt_backend_jm_watchdog_init(const struct kbase_hwcnt_backend_info *info,
						 struct kbase_hwcnt_backend **out_backend)
{
	int errcode = 0;
	struct kbase_hwcnt_backend_jm_watchdog *wd_backend = NULL;
	struct kbase_hwcnt_backend_jm_watchdog_info *const wd_info = (void *)info;
	const struct kbase_hwcnt_backend_info *jm_info;
	const struct kbase_hwcnt_metadata *metadata;
	enum wd_init_state state = HWCNT_JM_WD_INIT_START;

	if (WARN_ON(!info) || WARN_ON(!out_backend))
		return -EINVAL;

	jm_info = wd_info->jm_backend_iface->info;
	metadata = wd_info->jm_backend_iface->metadata(wd_info->jm_backend_iface->info);

	wd_backend = kmalloc(sizeof(*wd_backend), GFP_KERNEL);
	if (!wd_backend) {
		*out_backend = NULL;
		return -ENOMEM;
	}

	*wd_backend = (struct kbase_hwcnt_backend_jm_watchdog){
		.info = wd_info,
		.timeout_ms = hwcnt_backend_watchdog_timer_interval_ms,
		.locked = { .state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY, .is_enabled = false }
	};

	while (state < HWCNT_JM_WD_INIT_END && !errcode) {
		switch (state) {
		case HWCNT_JM_WD_INIT_BACKEND:
			errcode = wd_info->jm_backend_iface->init(jm_info, &wd_backend->jm_backend);
			break;
		case HWCNT_JM_WD_INIT_ENABLE_MAP:
			errcode =
				kbase_hwcnt_enable_map_alloc(metadata, &wd_backend->wd_enable_map);
			break;
		case HWCNT_JM_WD_INIT_DUMP_BUFFER:
			errcode = kbase_hwcnt_dump_buffer_alloc(metadata,
								&wd_backend->wd_dump_buffer);
			break;
		case HWCNT_JM_WD_INIT_END:
			break;
		}
		if (!errcode)
			state++;
	}

	if (errcode) {
		kbasep_hwcnt_backend_jm_watchdog_term_partial(wd_backend, state);
		*out_backend = NULL;
		return errcode;
	}

	WARN_ON(state != HWCNT_JM_WD_INIT_END);

	spin_lock_init(&wd_backend->locked.watchdog_lock);
	init_completion(&wd_backend->watchdog_complete);

	*out_backend = (struct kbase_hwcnt_backend *)wd_backend;
	return 0;
}

/* Job manager watchdog backend, implementation of timestamp_ns */
static u64 kbasep_hwcnt_backend_jm_watchdog_timestamp_ns(struct kbase_hwcnt_backend *backend)
{
	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;

	return wd_backend->info->jm_backend_iface->timestamp_ns(wd_backend->jm_backend);
}

static int kbasep_hwcnt_backend_jm_watchdog_dump_enable_common(
	struct kbase_hwcnt_backend_jm_watchdog *wd_backend,
	const struct kbase_hwcnt_enable_map *enable_map, kbase_hwcnt_backend_dump_enable_fn enabler)
{
	int errcode = -EPERM;
	unsigned long flags;

	if (WARN_ON(!wd_backend) || WARN_ON(!enable_map))
		return -EINVAL;

	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);

	/* If the backend is already enabled return an error */
	if (wd_backend->locked.is_enabled) {
		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		return -EPERM;
	}

	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);

	/*We copy the enable map into our watchdog backend copy, for future usage*/
	kbase_hwcnt_enable_map_copy(&wd_backend->wd_enable_map, enable_map);

	errcode = enabler(wd_backend->jm_backend, enable_map);
	if (!errcode) {
		/*Enable dump watchdog*/
		errcode = wd_backend->info->dump_watchdog_iface->enable(
			wd_backend->info->dump_watchdog_iface->timer, wd_backend->timeout_ms,
			kbasep_hwcnt_backend_jm_watchdog_timer_callback, wd_backend);
		if (!errcode) {
			spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
			WARN_ON(wd_backend->locked.is_enabled);
			wd_backend->locked.is_enabled = true;
			spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		} else
			/*Reverting the job manager backend back to disabled*/
			wd_backend->info->jm_backend_iface->dump_disable(wd_backend->jm_backend);
	}

	return errcode;
}

/* Job manager watchdog backend, implementation of dump_enable */
static int
kbasep_hwcnt_backend_jm_watchdog_dump_enable(struct kbase_hwcnt_backend *backend,
					     const struct kbase_hwcnt_enable_map *enable_map)
{
	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;

	return kbasep_hwcnt_backend_jm_watchdog_dump_enable_common(
		wd_backend, enable_map, wd_backend->info->jm_backend_iface->dump_enable);
}

/* Job manager watchdog backend, implementation of dump_enable_nolock */
static int
kbasep_hwcnt_backend_jm_watchdog_dump_enable_nolock(struct kbase_hwcnt_backend *backend,
						    const struct kbase_hwcnt_enable_map *enable_map)
{
	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;

	return kbasep_hwcnt_backend_jm_watchdog_dump_enable_common(
		wd_backend, enable_map, wd_backend->info->jm_backend_iface->dump_enable_nolock);
}

/* Job manager watchdog backend, implementation of dump_disable */
static void kbasep_hwcnt_backend_jm_watchdog_dump_disable(struct kbase_hwcnt_backend *backend)
{
	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
	unsigned long flags;

	if (WARN_ON(!backend))
		return;

	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
	if (!wd_backend->locked.is_enabled) {
		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		return;
	}

	wd_backend->locked.is_enabled = false;

	/* Discard undumped counter values since the last dump_get. */
	if (wd_backend->locked.state == HWCNT_JM_WD_IDLE_BUFFER_FULL)
		wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY;

	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);

	wd_backend->info->dump_watchdog_iface->disable(
		wd_backend->info->dump_watchdog_iface->timer);

	wd_backend->info->jm_backend_iface->dump_disable(wd_backend->jm_backend);
}

/* Job manager watchdog backend, implementation of dump_clear */
static int kbasep_hwcnt_backend_jm_watchdog_dump_clear(struct kbase_hwcnt_backend *backend)
{
	int errcode = -EPERM;
	bool clear_wd_wait_completion = false;
	unsigned long flags;
	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;

	if (WARN_ON(!backend))
		return -EINVAL;

	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
	if (!wd_backend->locked.is_enabled) {
		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		return -EPERM;
	}

	switch (wd_backend->locked.state) {
	case HWCNT_JM_WD_IDLE_BUFFER_FULL:
	case HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL:
	case HWCNT_JM_WD_IDLE_BUFFER_EMPTY:
	case HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY:
		wd_backend->locked.state = HWCNT_JM_WD_BUFFER_CLEARING;
		errcode = 0;
		break;
	case HWCNT_JM_WD_TIMER_DUMPING:
		/* The timer asked for a dump request, when complete, the job manager backend
		 * buffer will be zero
		 */
		clear_wd_wait_completion = true;
		/* This thread will have to wait for the callback to terminate and then call a
		 * dump_clear on the job manager backend. We change the state to
		 * HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR to notify the callback thread there is
		 * no more need to dump the buffer (since we will clear it right after anyway).
		 * We set up a wait queue to synchronize with the callback.
		 */
		reinit_completion(&wd_backend->watchdog_complete);
		wd_backend->locked.state = HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR;
		errcode = 0;
		break;
	default:
		errcode = -EPERM;
		break;
	}
	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);

	if (!errcode) {
		if (clear_wd_wait_completion) {
			/* Waiting for the callback to finish */
			wait_for_completion(&wd_backend->watchdog_complete);
		}

		/* Clearing job manager backend buffer */
		errcode = wd_backend->info->jm_backend_iface->dump_clear(wd_backend->jm_backend);

		spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);

		WARN_ON(wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_CLEAR &&
			wd_backend->locked.state != HWCNT_JM_WD_BUFFER_CLEARING &&
			wd_backend->locked.state != HWCNT_JM_WD_ERROR);

		WARN_ON(!wd_backend->locked.is_enabled);

		if (!errcode && wd_backend->locked.state != HWCNT_JM_WD_ERROR) {
			/* Setting the internal buffer state to EMPTY */
			wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY;
			/* Resetting the timer. Calling modify on a disabled timer
			 * enables it.
			 */
			wd_backend->info->dump_watchdog_iface->modify(
				wd_backend->info->dump_watchdog_iface->timer,
				wd_backend->timeout_ms);
		} else {
			wd_backend->locked.state = HWCNT_JM_WD_ERROR;
			errcode = -EPERM;
		}

		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
	}

	return errcode;
}

/* Job manager watchdog backend, implementation of dump_request */
static int kbasep_hwcnt_backend_jm_watchdog_dump_request(struct kbase_hwcnt_backend *backend,
							 u64 *dump_time_ns)
{
	bool call_dump_request = false;
	int errcode = 0;
	unsigned long flags;
	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;

	if (WARN_ON(!backend) || WARN_ON(!dump_time_ns))
		return -EINVAL;

	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);

	if (!wd_backend->locked.is_enabled) {
		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		return -EPERM;
	}

	switch (wd_backend->locked.state) {
	case HWCNT_JM_WD_IDLE_BUFFER_EMPTY:
		/* progressing the state to avoid callbacks running while calling the job manager
		 * backend
		 */
		wd_backend->locked.state = HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY;
		call_dump_request = true;
		break;
	case HWCNT_JM_WD_IDLE_BUFFER_FULL:
		wd_backend->locked.state = HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL;
		call_dump_request = true;
		break;
	case HWCNT_JM_WD_TIMER_DUMPING:
		/* Retrieve timing information from previous dump_request */
		*dump_time_ns = wd_backend->wd_dump_timestamp;
		/* On the next client call (dump_wait) the thread will have to wait for the
		 * callback to finish the dumping.
		 * We set up a wait queue to synchronize with the callback.
		 */
		reinit_completion(&wd_backend->watchdog_complete);
		wd_backend->locked.state = HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED;
		break;
	default:
		errcode = -EPERM;
		break;
	}
	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);

	if (call_dump_request) {
		errcode = wd_backend->info->jm_backend_iface->dump_request(wd_backend->jm_backend,
									   dump_time_ns);
		if (!errcode) {
			/*resetting the timer. Calling modify on a disabled timer enables it*/
			wd_backend->info->dump_watchdog_iface->modify(
				wd_backend->info->dump_watchdog_iface->timer,
				wd_backend->timeout_ms);
		} else {
			spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
			WARN_ON(!wd_backend->locked.is_enabled);
			wd_backend->locked.state = HWCNT_JM_WD_ERROR;
			spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		}
	}

	return errcode;
}

/* Job manager watchdog backend, implementation of dump_wait */
static int kbasep_hwcnt_backend_jm_watchdog_dump_wait(struct kbase_hwcnt_backend *backend)
{
	int errcode = -EPERM;
	bool wait_for_auto_dump = false, wait_for_user_dump = false;
	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
	unsigned long flags;

	if (WARN_ON(!backend))
		return -EINVAL;

	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
	if (!wd_backend->locked.is_enabled) {
		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		return -EPERM;
	}

	switch (wd_backend->locked.state) {
	case HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED:
		wait_for_auto_dump = true;
		errcode = 0;
		break;
	case HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY:
	case HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL:
		wait_for_user_dump = true;
		errcode = 0;
		break;
	default:
		errcode = -EPERM;
		break;
	}
	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);

	if (wait_for_auto_dump)
		wait_for_completion(&wd_backend->watchdog_complete);
	else if (wait_for_user_dump) {
		errcode = wd_backend->info->jm_backend_iface->dump_wait(wd_backend->jm_backend);
		if (errcode) {
			spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);
			WARN_ON(!wd_backend->locked.is_enabled);
			wd_backend->locked.state = HWCNT_JM_WD_ERROR;
			spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
		}
	}

	return errcode;
}

/* Job manager watchdog backend, implementation of dump_get */
static int kbasep_hwcnt_backend_jm_watchdog_dump_get(
	struct kbase_hwcnt_backend *backend, struct kbase_hwcnt_dump_buffer *dump_buffer,
	const struct kbase_hwcnt_enable_map *enable_map, bool accumulate)
{
	bool call_dump_get = false;
	struct kbase_hwcnt_backend_jm_watchdog *const wd_backend = (void *)backend;
	unsigned long flags;
	int errcode = 0;

	if (WARN_ON(!backend) || WARN_ON(!dump_buffer) || WARN_ON(!enable_map))
		return -EINVAL;

	/* The resultant contents of the dump buffer are only well defined if a prior
	 * call to dump_wait returned successfully, and a new dump has not yet been
	 * requested by a call to dump_request.
	 */

	spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);

	switch (wd_backend->locked.state) {
	case HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED:
		/*we assume dump_wait has been called and completed successfully*/
		if (accumulate)
			kbase_hwcnt_dump_buffer_accumulate(dump_buffer, &wd_backend->wd_dump_buffer,
							   enable_map);
		else
			kbase_hwcnt_dump_buffer_copy(dump_buffer, &wd_backend->wd_dump_buffer,
						     enable_map);

		/*use state to indicate the the buffer is now empty*/
		wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY;
		break;
	case HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL:
		/*accumulate or copy watchdog data to user buffer first so that dump_get can set
		 * the header correctly
		 */
		if (accumulate)
			kbase_hwcnt_dump_buffer_accumulate(dump_buffer, &wd_backend->wd_dump_buffer,
							   enable_map);
		else
			kbase_hwcnt_dump_buffer_copy(dump_buffer, &wd_backend->wd_dump_buffer,
						     enable_map);

		/*accumulate backend data into user buffer on top of watchdog data*/
		accumulate = true;
		call_dump_get = true;
		break;
	case HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY:
		call_dump_get = true;
		break;
	default:
		errcode = -EPERM;
		break;
	}

	spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);

	if (call_dump_get && !errcode) {
		/*we just dump the job manager backend into the user buffer, following
		 *accumulate flag
		 */
		errcode = wd_backend->info->jm_backend_iface->dump_get(
			wd_backend->jm_backend, dump_buffer, enable_map, accumulate);

		spin_lock_irqsave(&wd_backend->locked.watchdog_lock, flags);

		WARN_ON(wd_backend->locked.state != HWCNT_JM_WD_USER_DUMPING_BUFFER_EMPTY &&
			wd_backend->locked.state != HWCNT_JM_WD_USER_DUMPING_BUFFER_FULL &&
			wd_backend->locked.state != HWCNT_JM_WD_TIMER_DUMPING_USER_REQUESTED);

		if (!errcode)
			wd_backend->locked.state = HWCNT_JM_WD_IDLE_BUFFER_EMPTY;
		else
			wd_backend->locked.state = HWCNT_JM_WD_ERROR;

		spin_unlock_irqrestore(&wd_backend->locked.watchdog_lock, flags);
	}

	return errcode;
}

/* exposed methods */

int kbase_hwcnt_backend_jm_watchdog_create(struct kbase_hwcnt_backend_interface *backend_iface,
					   struct kbase_hwcnt_watchdog_interface *watchdog_iface,
					   struct kbase_hwcnt_backend_interface *out_iface)
{
	struct kbase_hwcnt_backend_jm_watchdog_info *info = NULL;

	if (WARN_ON(!backend_iface) || WARN_ON(!watchdog_iface) || WARN_ON(!out_iface))
		return -EINVAL;

	info = kbasep_hwcnt_backend_jm_watchdog_info_create(backend_iface, watchdog_iface);
	if (!info)
		return -ENOMEM;

	/*linking the info table with the output iface, to allow the callbacks below to access the
	 *info object later on
	 */
	*out_iface = (struct kbase_hwcnt_backend_interface){
		.info = (void *)info,
		.metadata = kbasep_hwcnt_backend_jm_watchdog_metadata,
		.init = kbasep_hwcnt_backend_jm_watchdog_init,
		.term = kbasep_hwcnt_backend_jm_watchdog_term,
		.timestamp_ns = kbasep_hwcnt_backend_jm_watchdog_timestamp_ns,
		.dump_enable = kbasep_hwcnt_backend_jm_watchdog_dump_enable,
		.dump_enable_nolock = kbasep_hwcnt_backend_jm_watchdog_dump_enable_nolock,
		.dump_disable = kbasep_hwcnt_backend_jm_watchdog_dump_disable,
		.dump_clear = kbasep_hwcnt_backend_jm_watchdog_dump_clear,
		.dump_request = kbasep_hwcnt_backend_jm_watchdog_dump_request,
		.dump_wait = kbasep_hwcnt_backend_jm_watchdog_dump_wait,
		.dump_get = kbasep_hwcnt_backend_jm_watchdog_dump_get
	};

	/*registering watchdog backend module methods on the output interface*/

	return 0;
}

void kbase_hwcnt_backend_jm_watchdog_destroy(struct kbase_hwcnt_backend_interface *iface)
{
	if (!iface || !iface->info)
		return;

	kfree((struct kbase_hwcnt_backend_jm_watchdog_info *)iface->info);

	/*blanking the watchdog backend interface*/
	memset(iface, 0, sizeof(*iface));
}