1 // SPDX-License-Identifier: GPL-2.0
3 * Kunit test for clk rate management
6 #include <linux/clk-provider.h>
8 /* Needed for clk_hw_get_clk() */
11 #include <kunit/test.h>
13 #define DUMMY_CLOCK_INIT_RATE (42 * 1000 * 1000)
14 #define DUMMY_CLOCK_RATE_1 (142 * 1000 * 1000)
15 #define DUMMY_CLOCK_RATE_2 (242 * 1000 * 1000)
17 struct clk_dummy_context {
22 static unsigned long clk_dummy_recalc_rate(struct clk_hw *hw,
23 unsigned long parent_rate)
25 struct clk_dummy_context *ctx =
26 container_of(hw, struct clk_dummy_context, hw);
31 static int clk_dummy_determine_rate(struct clk_hw *hw,
32 struct clk_rate_request *req)
34 /* Just return the same rate without modifying it */
38 static int clk_dummy_maximize_rate(struct clk_hw *hw,
39 struct clk_rate_request *req)
42 * If there's a maximum set, always run the clock at the maximum
45 if (req->max_rate < ULONG_MAX)
46 req->rate = req->max_rate;
51 static int clk_dummy_minimize_rate(struct clk_hw *hw,
52 struct clk_rate_request *req)
55 * If there's a minimum set, always run the clock at the minimum
58 if (req->min_rate > 0)
59 req->rate = req->min_rate;
64 static int clk_dummy_set_rate(struct clk_hw *hw,
66 unsigned long parent_rate)
68 struct clk_dummy_context *ctx =
69 container_of(hw, struct clk_dummy_context, hw);
75 static int clk_dummy_single_set_parent(struct clk_hw *hw, u8 index)
77 if (index >= clk_hw_get_num_parents(hw))
83 static u8 clk_dummy_single_get_parent(struct clk_hw *hw)
88 static const struct clk_ops clk_dummy_rate_ops = {
89 .recalc_rate = clk_dummy_recalc_rate,
90 .determine_rate = clk_dummy_determine_rate,
91 .set_rate = clk_dummy_set_rate,
94 static const struct clk_ops clk_dummy_maximize_rate_ops = {
95 .recalc_rate = clk_dummy_recalc_rate,
96 .determine_rate = clk_dummy_maximize_rate,
97 .set_rate = clk_dummy_set_rate,
100 static const struct clk_ops clk_dummy_minimize_rate_ops = {
101 .recalc_rate = clk_dummy_recalc_rate,
102 .determine_rate = clk_dummy_minimize_rate,
103 .set_rate = clk_dummy_set_rate,
106 static const struct clk_ops clk_dummy_single_parent_ops = {
107 .set_parent = clk_dummy_single_set_parent,
108 .get_parent = clk_dummy_single_get_parent,
111 static int clk_test_init_with_ops(struct kunit *test, const struct clk_ops *ops)
113 struct clk_dummy_context *ctx;
114 struct clk_init_data init = { };
117 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
120 ctx->rate = DUMMY_CLOCK_INIT_RATE;
123 init.name = "test_dummy_rate";
125 ctx->hw.init = &init;
127 ret = clk_hw_register(NULL, &ctx->hw);
134 static int clk_test_init(struct kunit *test)
136 return clk_test_init_with_ops(test, &clk_dummy_rate_ops);
139 static int clk_maximize_test_init(struct kunit *test)
141 return clk_test_init_with_ops(test, &clk_dummy_maximize_rate_ops);
144 static int clk_minimize_test_init(struct kunit *test)
146 return clk_test_init_with_ops(test, &clk_dummy_minimize_rate_ops);
149 static void clk_test_exit(struct kunit *test)
151 struct clk_dummy_context *ctx = test->priv;
153 clk_hw_unregister(&ctx->hw);
157 * Test that the actual rate matches what is returned by clk_get_rate()
159 static void clk_test_get_rate(struct kunit *test)
161 struct clk_dummy_context *ctx = test->priv;
162 struct clk_hw *hw = &ctx->hw;
163 struct clk *clk = hw->clk;
166 rate = clk_get_rate(clk);
167 KUNIT_ASSERT_GT(test, rate, 0);
168 KUNIT_EXPECT_EQ(test, rate, ctx->rate);
172 * Test that, after a call to clk_set_rate(), the rate returned by
173 * clk_get_rate() matches.
175 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
176 * modify the requested rate, which is our case in clk_dummy_rate_ops.
178 static void clk_test_set_get_rate(struct kunit *test)
180 struct clk_dummy_context *ctx = test->priv;
181 struct clk_hw *hw = &ctx->hw;
182 struct clk *clk = hw->clk;
185 KUNIT_ASSERT_EQ(test,
186 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
189 rate = clk_get_rate(clk);
190 KUNIT_ASSERT_GT(test, rate, 0);
191 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
195 * Test that, after several calls to clk_set_rate(), the rate returned
196 * by clk_get_rate() matches the last one.
198 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
199 * modify the requested rate, which is our case in clk_dummy_rate_ops.
201 static void clk_test_set_set_get_rate(struct kunit *test)
203 struct clk_dummy_context *ctx = test->priv;
204 struct clk_hw *hw = &ctx->hw;
205 struct clk *clk = hw->clk;
208 KUNIT_ASSERT_EQ(test,
209 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
212 KUNIT_ASSERT_EQ(test,
213 clk_set_rate(clk, DUMMY_CLOCK_RATE_2),
216 rate = clk_get_rate(clk);
217 KUNIT_ASSERT_GT(test, rate, 0);
218 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
222 * Test that clk_round_rate and clk_set_rate are consitent and will
223 * return the same frequency.
225 static void clk_test_round_set_get_rate(struct kunit *test)
227 struct clk_dummy_context *ctx = test->priv;
228 struct clk_hw *hw = &ctx->hw;
229 struct clk *clk = hw->clk;
230 unsigned long rounded_rate, set_rate;
232 rounded_rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1);
233 KUNIT_ASSERT_GT(test, rounded_rate, 0);
234 KUNIT_EXPECT_EQ(test, rounded_rate, DUMMY_CLOCK_RATE_1);
236 KUNIT_ASSERT_EQ(test,
237 clk_set_rate(clk, DUMMY_CLOCK_RATE_1),
240 set_rate = clk_get_rate(clk);
241 KUNIT_ASSERT_GT(test, set_rate, 0);
242 KUNIT_EXPECT_EQ(test, rounded_rate, set_rate);
245 static struct kunit_case clk_test_cases[] = {
246 KUNIT_CASE(clk_test_get_rate),
247 KUNIT_CASE(clk_test_set_get_rate),
248 KUNIT_CASE(clk_test_set_set_get_rate),
249 KUNIT_CASE(clk_test_round_set_get_rate),
253 static struct kunit_suite clk_test_suite = {
255 .init = clk_test_init,
256 .exit = clk_test_exit,
257 .test_cases = clk_test_cases,
260 struct clk_single_parent_ctx {
261 struct clk_dummy_context parent_ctx;
265 static int clk_orphan_transparent_single_parent_mux_test_init(struct kunit *test)
267 struct clk_single_parent_ctx *ctx;
268 struct clk_init_data init = { };
269 const char * const parents[] = { "orphan_parent" };
272 ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
277 init.name = "test_orphan_dummy_parent";
278 init.ops = &clk_dummy_single_parent_ops;
279 init.parent_names = parents;
280 init.num_parents = ARRAY_SIZE(parents);
281 init.flags = CLK_SET_RATE_PARENT;
282 ctx->hw.init = &init;
284 ret = clk_hw_register(NULL, &ctx->hw);
288 memset(&init, 0, sizeof(init));
289 init.name = "orphan_parent";
290 init.ops = &clk_dummy_rate_ops;
291 ctx->parent_ctx.hw.init = &init;
292 ctx->parent_ctx.rate = DUMMY_CLOCK_INIT_RATE;
294 ret = clk_hw_register(NULL, &ctx->parent_ctx.hw);
301 static void clk_orphan_transparent_single_parent_mux_test_exit(struct kunit *test)
303 struct clk_single_parent_ctx *ctx = test->priv;
305 clk_hw_unregister(&ctx->hw);
306 clk_hw_unregister(&ctx->parent_ctx.hw);
310 * Test that a mux-only clock, with an initial rate within a range,
311 * will still have the same rate after the range has been enforced.
313 static void clk_test_orphan_transparent_parent_mux_set_range(struct kunit *test)
315 struct clk_single_parent_ctx *ctx = test->priv;
316 struct clk_hw *hw = &ctx->hw;
317 struct clk *clk = hw->clk;
318 unsigned long rate, new_rate;
320 rate = clk_get_rate(clk);
321 KUNIT_ASSERT_GT(test, rate, 0);
323 KUNIT_ASSERT_EQ(test,
324 clk_set_rate_range(clk,
325 ctx->parent_ctx.rate - 1000,
326 ctx->parent_ctx.rate + 1000),
329 new_rate = clk_get_rate(clk);
330 KUNIT_ASSERT_GT(test, new_rate, 0);
331 KUNIT_EXPECT_EQ(test, rate, new_rate);
334 static struct kunit_case clk_orphan_transparent_single_parent_mux_test_cases[] = {
335 KUNIT_CASE(clk_test_orphan_transparent_parent_mux_set_range),
339 static struct kunit_suite clk_orphan_transparent_single_parent_test_suite = {
340 .name = "clk-orphan-transparent-single-parent-test",
341 .init = clk_orphan_transparent_single_parent_mux_test_init,
342 .exit = clk_orphan_transparent_single_parent_mux_test_exit,
343 .test_cases = clk_orphan_transparent_single_parent_mux_test_cases,
347 * Test that clk_set_rate_range won't return an error for a valid range
348 * and that it will make sure the rate of the clock is within the
351 static void clk_range_test_set_range(struct kunit *test)
353 struct clk_dummy_context *ctx = test->priv;
354 struct clk_hw *hw = &ctx->hw;
355 struct clk *clk = hw->clk;
358 KUNIT_ASSERT_EQ(test,
359 clk_set_rate_range(clk,
364 rate = clk_get_rate(clk);
365 KUNIT_ASSERT_GT(test, rate, 0);
366 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
367 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
371 * Test that calling clk_set_rate_range with a minimum rate higher than
372 * the maximum rate returns an error.
374 static void clk_range_test_set_range_invalid(struct kunit *test)
376 struct clk_dummy_context *ctx = test->priv;
377 struct clk_hw *hw = &ctx->hw;
378 struct clk *clk = hw->clk;
380 KUNIT_EXPECT_LT(test,
381 clk_set_rate_range(clk,
382 DUMMY_CLOCK_RATE_1 + 1000,
388 * Test that users can't set multiple, disjoints, range that would be
389 * impossible to meet.
391 static void clk_range_test_multiple_disjoints_range(struct kunit *test)
393 struct clk_dummy_context *ctx = test->priv;
394 struct clk_hw *hw = &ctx->hw;
395 struct clk *user1, *user2;
397 user1 = clk_hw_get_clk(hw, NULL);
398 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
400 user2 = clk_hw_get_clk(hw, NULL);
401 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
403 KUNIT_ASSERT_EQ(test,
404 clk_set_rate_range(user1, 1000, 2000),
407 KUNIT_EXPECT_LT(test,
408 clk_set_rate_range(user2, 3000, 4000),
416 * Test that if our clock has some boundaries and we try to round a rate
417 * lower than the minimum, the returned rate will be within range.
419 static void clk_range_test_set_range_round_rate_lower(struct kunit *test)
421 struct clk_dummy_context *ctx = test->priv;
422 struct clk_hw *hw = &ctx->hw;
423 struct clk *clk = hw->clk;
426 KUNIT_ASSERT_EQ(test,
427 clk_set_rate_range(clk,
432 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
433 KUNIT_ASSERT_GT(test, rate, 0);
434 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
435 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
439 * Test that if our clock has some boundaries and we try to set a rate
440 * higher than the maximum, the new rate will be within range.
442 static void clk_range_test_set_range_set_rate_lower(struct kunit *test)
444 struct clk_dummy_context *ctx = test->priv;
445 struct clk_hw *hw = &ctx->hw;
446 struct clk *clk = hw->clk;
449 KUNIT_ASSERT_EQ(test,
450 clk_set_rate_range(clk,
455 KUNIT_ASSERT_EQ(test,
456 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
459 rate = clk_get_rate(clk);
460 KUNIT_ASSERT_GT(test, rate, 0);
461 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
462 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
466 * Test that if our clock has some boundaries and we try to round and
467 * set a rate lower than the minimum, the rate returned by
468 * clk_round_rate() will be consistent with the new rate set by
471 static void clk_range_test_set_range_set_round_rate_consistent_lower(struct kunit *test)
473 struct clk_dummy_context *ctx = test->priv;
474 struct clk_hw *hw = &ctx->hw;
475 struct clk *clk = hw->clk;
478 KUNIT_ASSERT_EQ(test,
479 clk_set_rate_range(clk,
484 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_1 - 1000);
485 KUNIT_ASSERT_GT(test, rounded, 0);
487 KUNIT_ASSERT_EQ(test,
488 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
491 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
495 * Test that if our clock has some boundaries and we try to round a rate
496 * higher than the maximum, the returned rate will be within range.
498 static void clk_range_test_set_range_round_rate_higher(struct kunit *test)
500 struct clk_dummy_context *ctx = test->priv;
501 struct clk_hw *hw = &ctx->hw;
502 struct clk *clk = hw->clk;
505 KUNIT_ASSERT_EQ(test,
506 clk_set_rate_range(clk,
511 rate = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
512 KUNIT_ASSERT_GT(test, rate, 0);
513 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
514 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
518 * Test that if our clock has some boundaries and we try to set a rate
519 * higher than the maximum, the new rate will be within range.
521 static void clk_range_test_set_range_set_rate_higher(struct kunit *test)
523 struct clk_dummy_context *ctx = test->priv;
524 struct clk_hw *hw = &ctx->hw;
525 struct clk *clk = hw->clk;
528 KUNIT_ASSERT_EQ(test,
529 clk_set_rate_range(clk,
534 KUNIT_ASSERT_EQ(test,
535 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
538 rate = clk_get_rate(clk);
539 KUNIT_ASSERT_GT(test, rate, 0);
540 KUNIT_EXPECT_GE(test, rate, DUMMY_CLOCK_RATE_1);
541 KUNIT_EXPECT_LE(test, rate, DUMMY_CLOCK_RATE_2);
545 * Test that if our clock has some boundaries and we try to round and
546 * set a rate higher than the maximum, the rate returned by
547 * clk_round_rate() will be consistent with the new rate set by
550 static void clk_range_test_set_range_set_round_rate_consistent_higher(struct kunit *test)
552 struct clk_dummy_context *ctx = test->priv;
553 struct clk_hw *hw = &ctx->hw;
554 struct clk *clk = hw->clk;
557 KUNIT_ASSERT_EQ(test,
558 clk_set_rate_range(clk,
563 rounded = clk_round_rate(clk, DUMMY_CLOCK_RATE_2 + 1000);
564 KUNIT_ASSERT_GT(test, rounded, 0);
566 KUNIT_ASSERT_EQ(test,
567 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
570 KUNIT_EXPECT_EQ(test, rounded, clk_get_rate(clk));
574 * Test that if our clock has a rate lower than the minimum set by a
575 * call to clk_set_rate_range(), the rate will be raised to match the
578 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
579 * modify the requested rate, which is our case in clk_dummy_rate_ops.
581 static void clk_range_test_set_range_get_rate_raised(struct kunit *test)
583 struct clk_dummy_context *ctx = test->priv;
584 struct clk_hw *hw = &ctx->hw;
585 struct clk *clk = hw->clk;
588 KUNIT_ASSERT_EQ(test,
589 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
592 KUNIT_ASSERT_EQ(test,
593 clk_set_rate_range(clk,
598 rate = clk_get_rate(clk);
599 KUNIT_ASSERT_GT(test, rate, 0);
600 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
604 * Test that if our clock has a rate higher than the maximum set by a
605 * call to clk_set_rate_range(), the rate will be lowered to match the
608 * This assumes that clk_ops.determine_rate or clk_ops.round_rate won't
609 * modify the requested rate, which is our case in clk_dummy_rate_ops.
611 static void clk_range_test_set_range_get_rate_lowered(struct kunit *test)
613 struct clk_dummy_context *ctx = test->priv;
614 struct clk_hw *hw = &ctx->hw;
615 struct clk *clk = hw->clk;
618 KUNIT_ASSERT_EQ(test,
619 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
622 KUNIT_ASSERT_EQ(test,
623 clk_set_rate_range(clk,
628 rate = clk_get_rate(clk);
629 KUNIT_ASSERT_GT(test, rate, 0);
630 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
633 static struct kunit_case clk_range_test_cases[] = {
634 KUNIT_CASE(clk_range_test_set_range),
635 KUNIT_CASE(clk_range_test_set_range_invalid),
636 KUNIT_CASE(clk_range_test_multiple_disjoints_range),
637 KUNIT_CASE(clk_range_test_set_range_round_rate_lower),
638 KUNIT_CASE(clk_range_test_set_range_set_rate_lower),
639 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_lower),
640 KUNIT_CASE(clk_range_test_set_range_round_rate_higher),
641 KUNIT_CASE(clk_range_test_set_range_set_rate_higher),
642 KUNIT_CASE(clk_range_test_set_range_set_round_rate_consistent_higher),
643 KUNIT_CASE(clk_range_test_set_range_get_rate_raised),
644 KUNIT_CASE(clk_range_test_set_range_get_rate_lowered),
648 static struct kunit_suite clk_range_test_suite = {
649 .name = "clk-range-test",
650 .init = clk_test_init,
651 .exit = clk_test_exit,
652 .test_cases = clk_range_test_cases,
656 * Test that if we have several subsequent calls to
657 * clk_set_rate_range(), the core will reevaluate whether a new rate is
658 * needed each and every time.
660 * With clk_dummy_maximize_rate_ops, this means that the rate will
661 * trail along the maximum as it evolves.
663 static void clk_range_test_set_range_rate_maximized(struct kunit *test)
665 struct clk_dummy_context *ctx = test->priv;
666 struct clk_hw *hw = &ctx->hw;
667 struct clk *clk = hw->clk;
670 KUNIT_ASSERT_EQ(test,
671 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
674 KUNIT_ASSERT_EQ(test,
675 clk_set_rate_range(clk,
680 rate = clk_get_rate(clk);
681 KUNIT_ASSERT_GT(test, rate, 0);
682 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
684 KUNIT_ASSERT_EQ(test,
685 clk_set_rate_range(clk,
687 DUMMY_CLOCK_RATE_2 - 1000),
690 rate = clk_get_rate(clk);
691 KUNIT_ASSERT_GT(test, rate, 0);
692 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2 - 1000);
694 KUNIT_ASSERT_EQ(test,
695 clk_set_rate_range(clk,
700 rate = clk_get_rate(clk);
701 KUNIT_ASSERT_GT(test, rate, 0);
702 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
706 * Test that if we have several subsequent calls to
707 * clk_set_rate_range(), across multiple users, the core will reevaluate
708 * whether a new rate is needed each and every time.
710 * With clk_dummy_maximize_rate_ops, this means that the rate will
711 * trail along the maximum as it evolves.
713 static void clk_range_test_multiple_set_range_rate_maximized(struct kunit *test)
715 struct clk_dummy_context *ctx = test->priv;
716 struct clk_hw *hw = &ctx->hw;
717 struct clk *clk = hw->clk;
718 struct clk *user1, *user2;
721 user1 = clk_hw_get_clk(hw, NULL);
722 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
724 user2 = clk_hw_get_clk(hw, NULL);
725 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
727 KUNIT_ASSERT_EQ(test,
728 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
731 KUNIT_ASSERT_EQ(test,
732 clk_set_rate_range(user1,
737 rate = clk_get_rate(clk);
738 KUNIT_ASSERT_GT(test, rate, 0);
739 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
741 KUNIT_ASSERT_EQ(test,
742 clk_set_rate_range(user2,
747 rate = clk_get_rate(clk);
748 KUNIT_ASSERT_GT(test, rate, 0);
749 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
751 KUNIT_ASSERT_EQ(test,
752 clk_drop_range(user2),
755 rate = clk_get_rate(clk);
756 KUNIT_ASSERT_GT(test, rate, 0);
757 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
764 * Test that if we have several subsequent calls to
765 * clk_set_rate_range(), across multiple users, the core will reevaluate
766 * whether a new rate is needed, including when a user drop its clock.
768 * With clk_dummy_maximize_rate_ops, this means that the rate will
769 * trail along the maximum as it evolves.
771 static void clk_range_test_multiple_set_range_rate_put_maximized(struct kunit *test)
773 struct clk_dummy_context *ctx = test->priv;
774 struct clk_hw *hw = &ctx->hw;
775 struct clk *clk = hw->clk;
776 struct clk *user1, *user2;
779 user1 = clk_hw_get_clk(hw, NULL);
780 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
782 user2 = clk_hw_get_clk(hw, NULL);
783 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
785 KUNIT_ASSERT_EQ(test,
786 clk_set_rate(clk, DUMMY_CLOCK_RATE_2 + 1000),
789 KUNIT_ASSERT_EQ(test,
790 clk_set_rate_range(user1,
795 rate = clk_get_rate(clk);
796 KUNIT_ASSERT_GT(test, rate, 0);
797 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
799 KUNIT_ASSERT_EQ(test,
800 clk_set_rate_range(user2,
805 rate = clk_get_rate(clk);
806 KUNIT_ASSERT_GT(test, rate, 0);
807 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
811 rate = clk_get_rate(clk);
812 KUNIT_ASSERT_GT(test, rate, 0);
813 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
818 static struct kunit_case clk_range_maximize_test_cases[] = {
819 KUNIT_CASE(clk_range_test_set_range_rate_maximized),
820 KUNIT_CASE(clk_range_test_multiple_set_range_rate_maximized),
821 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_maximized),
825 static struct kunit_suite clk_range_maximize_test_suite = {
826 .name = "clk-range-maximize-test",
827 .init = clk_maximize_test_init,
828 .exit = clk_test_exit,
829 .test_cases = clk_range_maximize_test_cases,
833 * Test that if we have several subsequent calls to
834 * clk_set_rate_range(), the core will reevaluate whether a new rate is
835 * needed each and every time.
837 * With clk_dummy_minimize_rate_ops, this means that the rate will
838 * trail along the minimum as it evolves.
840 static void clk_range_test_set_range_rate_minimized(struct kunit *test)
842 struct clk_dummy_context *ctx = test->priv;
843 struct clk_hw *hw = &ctx->hw;
844 struct clk *clk = hw->clk;
847 KUNIT_ASSERT_EQ(test,
848 clk_set_rate(clk, DUMMY_CLOCK_RATE_1 - 1000),
851 KUNIT_ASSERT_EQ(test,
852 clk_set_rate_range(clk,
857 rate = clk_get_rate(clk);
858 KUNIT_ASSERT_GT(test, rate, 0);
859 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
861 KUNIT_ASSERT_EQ(test,
862 clk_set_rate_range(clk,
863 DUMMY_CLOCK_RATE_1 + 1000,
867 rate = clk_get_rate(clk);
868 KUNIT_ASSERT_GT(test, rate, 0);
869 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1 + 1000);
871 KUNIT_ASSERT_EQ(test,
872 clk_set_rate_range(clk,
877 rate = clk_get_rate(clk);
878 KUNIT_ASSERT_GT(test, rate, 0);
879 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
883 * Test that if we have several subsequent calls to
884 * clk_set_rate_range(), across multiple users, the core will reevaluate
885 * whether a new rate is needed each and every time.
887 * With clk_dummy_minimize_rate_ops, this means that the rate will
888 * trail along the minimum as it evolves.
890 static void clk_range_test_multiple_set_range_rate_minimized(struct kunit *test)
892 struct clk_dummy_context *ctx = test->priv;
893 struct clk_hw *hw = &ctx->hw;
894 struct clk *clk = hw->clk;
895 struct clk *user1, *user2;
898 user1 = clk_hw_get_clk(hw, NULL);
899 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
901 user2 = clk_hw_get_clk(hw, NULL);
902 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
904 KUNIT_ASSERT_EQ(test,
905 clk_set_rate_range(user1,
910 rate = clk_get_rate(clk);
911 KUNIT_ASSERT_GT(test, rate, 0);
912 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
914 KUNIT_ASSERT_EQ(test,
915 clk_set_rate_range(user2,
920 rate = clk_get_rate(clk);
921 KUNIT_ASSERT_GT(test, rate, 0);
922 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
924 KUNIT_ASSERT_EQ(test,
925 clk_drop_range(user2),
928 rate = clk_get_rate(clk);
929 KUNIT_ASSERT_GT(test, rate, 0);
930 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
937 * Test that if we have several subsequent calls to
938 * clk_set_rate_range(), across multiple users, the core will reevaluate
939 * whether a new rate is needed, including when a user drop its clock.
941 * With clk_dummy_minimize_rate_ops, this means that the rate will
942 * trail along the minimum as it evolves.
944 static void clk_range_test_multiple_set_range_rate_put_minimized(struct kunit *test)
946 struct clk_dummy_context *ctx = test->priv;
947 struct clk_hw *hw = &ctx->hw;
948 struct clk *clk = hw->clk;
949 struct clk *user1, *user2;
952 user1 = clk_hw_get_clk(hw, NULL);
953 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user1);
955 user2 = clk_hw_get_clk(hw, NULL);
956 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, user2);
958 KUNIT_ASSERT_EQ(test,
959 clk_set_rate_range(user1,
964 rate = clk_get_rate(clk);
965 KUNIT_ASSERT_GT(test, rate, 0);
966 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
968 KUNIT_ASSERT_EQ(test,
969 clk_set_rate_range(user2,
974 rate = clk_get_rate(clk);
975 KUNIT_ASSERT_GT(test, rate, 0);
976 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_2);
980 rate = clk_get_rate(clk);
981 KUNIT_ASSERT_GT(test, rate, 0);
982 KUNIT_EXPECT_EQ(test, rate, DUMMY_CLOCK_RATE_1);
987 static struct kunit_case clk_range_minimize_test_cases[] = {
988 KUNIT_CASE(clk_range_test_set_range_rate_minimized),
989 KUNIT_CASE(clk_range_test_multiple_set_range_rate_minimized),
990 KUNIT_CASE(clk_range_test_multiple_set_range_rate_put_minimized),
994 static struct kunit_suite clk_range_minimize_test_suite = {
995 .name = "clk-range-minimize-test",
996 .init = clk_minimize_test_init,
997 .exit = clk_test_exit,
998 .test_cases = clk_range_minimize_test_cases,
1003 &clk_orphan_transparent_single_parent_test_suite,
1004 &clk_range_test_suite,
1005 &clk_range_maximize_test_suite,
1006 &clk_range_minimize_test_suite
1008 MODULE_LICENSE("GPL v2");