diff --git a/data/test/tabletserver/exec_cases.txt b/data/test/tabletserver/exec_cases.txt index eeb0131ff5b..7b19abae91e 100644 --- a/data/test/tabletserver/exec_cases.txt +++ b/data/test/tabletserver/exec_cases.txt @@ -3,6 +3,16 @@ { "PlanID": "PASS_SELECT", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + }, + { + "TableName": "b", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1 union select * from b where 1 != 1", "FullQuery": "select * from a union select * from b limit :#maxLimit" } @@ -12,6 +22,16 @@ { "PlanID": "PASS_SELECT", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + }, + { + "TableName": "b", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1 union select * from b where 1 != 1", "FullQuery": "select * from a union select * from b limit 10" } @@ -21,6 +41,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1", "FullQuery": "select distinct * from a limit :#maxLimit" } @@ -30,6 +56,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1 group by b", "FullQuery": "select * from a group by b limit :#maxLimit" } @@ -39,6 +71,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1", "FullQuery": "select * from a having b = 1 limit :#maxLimit" } @@ -48,6 +86,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1", "FullQuery": "select * from a limit 5" } @@ -57,6 +101,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1", "FullQuery": "select * from a limit 10, 5" } @@ -66,6 +116,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1", "FullQuery": "select * from a limit 10, 5" } @@ -75,6 +131,12 @@ { "PlanID": "PASS_SELECT", "TableName": "", + "Permissions": [ + { + "TableName": "b", + "Role": 0 + } + ], "FieldQuery": "select * from a.b where 1 != 1", "FullQuery": "select * from a.b limit :#maxLimit" } @@ -84,6 +146,16 @@ { "PlanID": "PASS_SELECT", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + }, + { + "TableName": "b", + "Role": 0 + } + ], "FieldQuery": "select * from a, b where 1 != 1", "FullQuery": "select * from a, b limit :#maxLimit" } @@ -93,6 +165,16 @@ { "PlanID": "PASS_SELECT", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + }, + { + "TableName": "b", + "Role": 0 + } + ], "FieldQuery": "select * from a join b where 1 != 1", "FullQuery": "select * from a join b limit :#maxLimit" } @@ -102,6 +184,16 @@ { "PlanID": "PASS_SELECT", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + }, + { + "TableName": "b", + "Role": 0 + } + ], "FieldQuery": "select * from a right join b on c = d where 1 != 1", "FullQuery": "select * from a right join b on c = d limit :#maxLimit" } @@ -111,6 +203,12 @@ { "PlanID": "PASS_SELECT", "TableName": "", + "Permissions": [ + { + "TableName": "b", + "Role": 0 + } + ], "FieldQuery": "select * from (b) where 1 != 1", "FullQuery": "select * from (b) limit :#maxLimit" } @@ -120,6 +218,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FullQuery": "select :bv from a limit :#maxLimit" } @@ -128,6 +232,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select eid from a where 1 != 1", "FullQuery": "select eid from a limit :#maxLimit" } @@ -137,6 +247,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select eid as foo from a where 1 != 1", "FullQuery": "select eid as foo from a limit :#maxLimit" } @@ -146,6 +262,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select * from a where 1 != 1", "FullQuery": "select * from a limit :#maxLimit" } @@ -155,6 +277,12 @@ { "PlanID": "PASS_SELECT", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select c.eid from a as c where 1 != 1", "FullQuery": "select c.eid from a as c limit :#maxLimit" } @@ -164,6 +292,12 @@ { "PlanID": "SELECT_LOCK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select eid from a where 1 != 1", "FullQuery": "select eid from a limit :#maxLimit for update" } @@ -173,6 +307,12 @@ { "PlanID": "SELECT_LOCK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 0 + } + ], "FieldQuery": "select eid from a where 1 != 1", "FullQuery": "select eid from a limit :#maxLimit lock in share mode" } @@ -183,6 +323,12 @@ "PlanID": "PASS_DML", "Reason": "TABLE", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into b.a(eid, id) values (1, :a)" } @@ -192,6 +338,12 @@ options:PassthroughDMLs { "PlanID": "PASS_DML", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into b.a(eid, id) values (1, :a)" } @@ -200,6 +352,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1, :a)", "OuterQuery": "insert into a(eid, id) values (1, :a)", "PKValues": [[1], [":a"]] @@ -211,6 +369,12 @@ options:PassthroughDMLs { "PlanID": "PASS_DML", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1, :a)" } @@ -219,6 +383,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(id) values (1)", "OuterQuery": "insert into a(id) values (1)", "PKValues": [0, [1]] @@ -229,6 +399,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "d", + "Permissions": [ + { + "TableName": "d", + "Role": 1 + } + ], "FullQuery": "insert into d(id) values (1)", "OuterQuery": "insert into d(id) values (1)", "PKValues": ["0"] @@ -239,6 +415,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "d", + "Permissions": [ + { + "TableName": "d", + "Role": 1 + } + ], "FullQuery": "insert into d(id) values (1)", "OuterQuery": "insert into d(id) values (1)", "PKValues": ["0"] @@ -253,6 +435,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (-1, 2)", "OuterQuery": "insert into a(eid, id) values (-1, 2)", "PKValues": [[-1], [2]] @@ -263,6 +451,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1, 2)", "OuterQuery": "insert into a(eid, id) values (1, 2)", "PKValues": [[1], [2]] @@ -274,6 +468,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "COMPLEX_EXPR", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (~1, 2)" } @@ -283,6 +483,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "COMPLEX_EXPR", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1 + 1, 2)" } @@ -292,6 +498,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "COMPLEX_EXPR", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (0x04, 2)" } @@ -301,6 +513,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "TABLE_NOINDEX", "TableName": "c", + "Permissions": [ + { + "TableName": "c", + "Role": 1 + } + ], "FullQuery": "insert into c(eid, id) values (1, 2)" } @@ -309,6 +527,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a values (1, 2, 'name', 'foo', 'camelcase')", "OuterQuery": "insert into a(eid, id, name, foo, CamelCase) values (1, 2, 'name', 'foo', 'camelcase')", "PKValues": [[1], [2]] @@ -319,6 +543,12 @@ options:PassthroughDMLs { "PlanID": "UPSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1, 2) on duplicate key update name = func(a)", "OuterQuery": "insert into a(eid, id) values (1, 2)", "UpsertQuery": "update a set name = func(a) where :#pk", @@ -331,6 +561,12 @@ options:PassthroughDMLs { "PlanID": "PASS_DML", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1, 2) on duplicate key update name = func(a)" } @@ -339,6 +575,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "insert into b(eid, id) values (1, 2) on duplicate key update name = func(a)", "OuterQuery": "insert into b(eid, id) values (1, 2) on duplicate key update name = func(a)", "PKValues": [[1], [2]] @@ -349,6 +591,12 @@ options:PassthroughDMLs { "PlanID": "UPSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1, 2) on duplicate key update eid = 2", "OuterQuery": "insert into a(eid, id) values (1, 2)", "UpsertQuery": "update a set eid = 2 where :#pk", @@ -361,6 +609,12 @@ options:PassthroughDMLs { "PlanID": "UPSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id, name) values (1, 2, 'foo') on duplicate key update name = values(name)", "OuterQuery": "insert into a(eid, id, name) values (1, 2, 'foo')", "UpsertQuery": "update a set name = ('foo') where :#pk", @@ -372,6 +626,12 @@ options:PassthroughDMLs { "PlanID": "UPSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id, name) values (1, 2, 'foo') on duplicate key update name = concat(values(name), 'foo')", "OuterQuery": "insert into a(eid, id, name) values (1, 2, 'foo')", "UpsertQuery": "update a set name = concat(('foo'), 'foo') where :#pk", @@ -383,6 +643,12 @@ options:PassthroughDMLs { "PlanID": "UPSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id, name) values (1, 2, 3) on duplicate key update name = values(name) + 5", "OuterQuery": "insert into a(eid, id, name) values (1, 2, 3)", "UpsertQuery": "update a set name = (3) + 5 where :#pk", @@ -394,6 +660,12 @@ options:PassthroughDMLs { "PlanID": "UPSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id, name) values (1, :id, :name) on duplicate key update name = values(name), id = values(id)", "OuterQuery": "insert into a(eid, id, name) values (1, :id, :name)", "UpsertQuery": "update a set name = (:name), id = (:id) where :#pk", @@ -407,6 +679,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "COMPLEX_EXPR", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1 + 1, 2) on duplicate key update eid = values(eid) + 1" } @@ -416,6 +694,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "PK_CHANGE", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id, name) values (1, 2, 1 + 1) on duplicate key update eid = values(name)", "PKValues": [[1],[2]] } @@ -426,6 +710,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "PK_CHANGE", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1, 2) on duplicate key update eid = values(name)", "PKValues": [[1],[2]] } @@ -439,6 +729,12 @@ options:PassthroughDMLs { "PlanID": "UPSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id) values (1, 2) on duplicate key update eid = values(eid), id = values(id)", "OuterQuery": "insert into a(eid, id) values (1, 2)", "UpsertQuery": "update a set eid = (1), id = (2) where :#pk", @@ -451,6 +747,12 @@ options:PassthroughDMLs { "PlanID": "UPSERT_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(eid, id, name) values (1, 2, 'foo') on duplicate key update eid = 2, id = values(id), name = func()", "OuterQuery": "insert into a(eid, id, name) values (1, 2, 'foo')", "UpsertQuery": "update a set eid = 2, id = (2), name = func() where :#pk", @@ -464,6 +766,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "PK_CHANGE", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(id, eid) values (1, 2) on duplicate key update eid = func(a)", "PKValues": [[2], [1]] } @@ -474,6 +782,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "UPSERT_MULTI_ROW", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "insert into a(id, eid) values (1, 2), (2, 3) on duplicate key update name = func(a)", "PKValues": [[2,3],[1,2]] } @@ -483,6 +797,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "insert into b(id, eid) values (1, 2), (2, 3) on duplicate key update name = func(a)", "OuterQuery": "insert into b(id, eid) values (1, 2), (2, 3) on duplicate key update name = func(a)", "PKValues": [[2,3],[1,2]] @@ -493,6 +813,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "insert into b(id, eid) values (1, 2), (2, 3) on duplicate key update id = 1", "OuterQuery": "insert into b(id, eid) values (1, 2), (2, 3) on duplicate key update id = 1", "PKValues": [[2,3],[1,2]], @@ -504,6 +830,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "insert into b(id, eid) values (1, 2), (3, 4) on duplicate key update id = values(eid)", "OuterQuery": "insert into b(id, eid) values (1, 2), (3, 4) on duplicate key update id = values(eid)", "PKValues": [[2,4],[1,3]], @@ -516,6 +848,16 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "UPSERT_SUBQUERY", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + }, + { + "TableName": "a", + "Role": 0 + } + ], "FullQuery": "insert into b(id, eid) select * from a on duplicate key update name = func(a)" } @@ -524,6 +866,16 @@ options:PassthroughDMLs { "PlanID": "INSERT_SUBQUERY", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + }, + { + "TableName": "a", + "Role": 0 + } + ], "FullQuery": "insert into b(eid, id) select * from a", "OuterQuery": "insert into b(eid, id) values :#values", "Subquery": "select * from a limit :#maxLimit", @@ -536,6 +888,16 @@ options:PassthroughDMLs { "PlanID": "INSERT_SUBQUERY", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + }, + { + "TableName": "a", + "Role": 0 + } + ], "FullQuery": "insert into b select * from a", "OuterQuery": "insert into b(eid, id) values :#values", "Subquery": "select * from a limit :#maxLimit", @@ -548,6 +910,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "insert into b(eid, id) values (1, 2), (3, 4)", "OuterQuery": "insert into b(eid, id) values (1, 2), (3, 4)", "PKValues": [[1, 3], [2, 4]] @@ -558,6 +926,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_MESSAGE", "TableName": "msg", + "Permissions": [ + { + "TableName": "msg", + "Role": 1 + } + ], "FullQuery": "insert into msg(time_scheduled, id, message) values (1, 2, 'aa')", "OuterQuery": "insert into msg(time_scheduled, id, message, time_next, time_created, epoch) values (1, 2, 'aa', 1, :#time_now, 0)", "PKValues": [[1], [2]] @@ -568,6 +942,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_MESSAGE", "TableName": "msg", + "Permissions": [ + { + "TableName": "msg", + "Role": 1 + } + ], "FullQuery": "insert into msg(id, message) values (2, 'aa')", "OuterQuery": "insert into msg(id, message, time_scheduled, time_next, time_created, epoch) values (2, 'aa', :#time_now, :#time_now, :#time_now, 0)", "PKValues": [[":#time_now"], [2]] @@ -578,6 +958,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_MESSAGE", "TableName": "msg", + "Permissions": [ + { + "TableName": "msg", + "Role": 1 + } + ], "FullQuery": "insert into msg(time_scheduled, id, message) values (1, 2, 'aa'), (3, 4, 'bb')", "OuterQuery": "insert into msg(time_scheduled, id, message, time_next, time_created, epoch) values (1, 2, 'aa', 1, :#time_now, 0), (3, 4, 'bb', 3, :#time_now, 0)", "PKValues": [ @@ -634,6 +1020,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason":"REPLACE", "TableName": "", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "replace into b(eid, id) values (1, 2), (3, 4)" } @@ -643,6 +1035,12 @@ options:PassthroughDMLs { "PlanID": "PASS_DML", "TableName": "", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "replace into b(eid, id) values (1, 2), (3, 4)" } @@ -652,6 +1050,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason":"REPLACE", "TableName": "", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "replace into b(eid, id) values (1, 2)" } @@ -661,6 +1065,16 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason":"REPLACE", "TableName": "", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + }, + { + "TableName": "a", + "Role": 0 + } + ], "FullQuery": "replace into b(eid, id) select * from a" } @@ -670,6 +1084,16 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason":"REPLACE", "TableName": "", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + }, + { + "TableName": "a", + "Role": 0 + } + ], "FullQuery": "replace into b select * from a" } @@ -678,6 +1102,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "d", + "Permissions": [ + { + "TableName": "d", + "Role": 1 + } + ], "FullQuery": "update d set foo = 'foo' where name in ('a', 'b') limit 1", "OuterQuery": "update d set foo = 'foo' where :#pk", "Subquery": "select name from d where name in ('a', 'b') limit 1 for update", @@ -690,6 +1120,12 @@ options:PassthroughDMLs { "PlanID": "PASS_DML", "TableName": "", + "Permissions": [ + { + "TableName": "d", + "Role": 1 + } + ], "FullQuery": "update d set foo = 'foo' where name in ('a', 'b') limit 1" } @@ -699,6 +1135,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "TABLE", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update b.a set name = 'foo' where eid = 1 and id = 1" } @@ -708,6 +1150,12 @@ options:PassthroughDMLs { "PlanID": "PASS_DML", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update b.a set name = 'foo' where eid = 1 and id = 1" } @@ -717,6 +1165,16 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "MULTI_TABLE", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + }, + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "update a, b set a.name = 'foo' where a.id = b.id and b.var = 'test'" } @@ -726,6 +1184,16 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "MULTI_TABLE", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + }, + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "update a join b on a.id = b.id set a.name = 'foo' where b.var = 'test'" } @@ -734,6 +1202,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "update b set eid = 1", "OuterQuery": "update b set eid = 1 where :#pk", "Subquery": "select eid, id from b limit :#maxLimit for update", @@ -751,6 +1225,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "PK_CHANGE", "TableName": "b", + "Permissions": [ + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "update b set eid = foo()", "WhereClause": "" } @@ -760,6 +1240,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo'", "OuterQuery": "update a set name = 'foo' where :#pk", "Subquery": "select eid, id from a limit :#maxLimit for update", @@ -771,6 +1257,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo' where eid + 1 = 1", "OuterQuery": "update a set name = 'foo' where :#pk", "Subquery": "select eid, id from a where eid + 1 = 1 limit :#maxLimit for update", @@ -782,6 +1274,12 @@ options:PassthroughDMLs { "PlanID": "DML_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo' where eid = 1 and id = 1", "OuterQuery": "update a set name = 'foo' where :#pk", "PKValues": [1, 1], @@ -793,6 +1291,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo' where eid = 1", "OuterQuery": "update a set name = 'foo' where :#pk", "Subquery": "select eid, id from a where eid = 1 limit :#maxLimit for update", @@ -804,6 +1308,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo' where eid = 1.0 and id = 1", "OuterQuery": "update a set name = 'foo' where :#pk", "Subquery": "select eid, id from a where eid = 1.0 and id = 1 limit :#maxLimit for update", @@ -815,6 +1325,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo' where eid = 1 limit 10", "OuterQuery": "update a set name = 'foo' where :#pk", "Subquery": "select eid, id from a where eid = 1 limit 10 for update", @@ -826,6 +1342,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo' where eid = 1 and name = 'foo'", "OuterQuery": "update a set name = 'foo' where :#pk", "Subquery": "select eid, id from a where eid = 1 and name = 'foo' limit :#maxLimit for update", @@ -838,6 +1360,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "TABLE_NOINDEX", "TableName": "c", + "Permissions": [ + { + "TableName": "c", + "Role": 1 + } + ], "FullQuery": "update c set eid = 1", "WhereClause": "" } @@ -847,6 +1375,12 @@ options:PassthroughDMLs { "PlanID":"DML_SUBQUERY", "TableName":"a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery":"update a set name = 'foo' where eid + 1 = 1 and id = 1", "OuterQuery":"update a set name = 'foo' where :#pk", "Subquery":"select eid, id from a where eid + 1 = 1 and id = 1 limit :#maxLimit for update", @@ -858,6 +1392,12 @@ options:PassthroughDMLs { "PlanID": "DML_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo' where (eid = 1) and id = 1", "OuterQuery": "update a set name = 'foo' where :#pk", "PKValues": [1, 1], @@ -869,6 +1409,12 @@ options:PassthroughDMLs { "PlanID":"DML_PK", "TableName":"a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery":"update a set name = 'foo' where eid in (1, 2) and id = 1", "OuterQuery":"update a set name = 'foo' where :#pk", "PKValues":[[1,2],1], @@ -880,6 +1426,12 @@ options:PassthroughDMLs { "PlanID":"DML_SUBQUERY", "TableName":"a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery":"update a set name = 'foo' where eid in (1, 2) and id in (1, 2)", "OuterQuery":"update a set name = 'foo' where :#pk", "Subquery":"select eid, id from a where eid in (1, 2) and id in (1, 2) limit :#maxLimit for update", @@ -891,6 +1443,12 @@ options:PassthroughDMLs { "PlanID":"DML_SUBQUERY", "TableName":"a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery":"update a set name = 'foo' where eid = 1 and eid = 2", "OuterQuery":"update a set name = 'foo' where :#pk", "Subquery":"select eid, id from a where eid = 1 and eid = 2 limit :#maxLimit for update", @@ -902,6 +1460,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a set name = 'foo' where eid = 1 order by id desc", "OuterQuery": "update a set name = 'foo' where :#pk order by id desc", "Subquery": "select eid, id from a where eid = 1 order by id desc limit :#maxLimit for update", @@ -914,6 +1478,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "update a use index (b) set name = 'foo' where eid = 1", "OuterQuery": "update a set name = 'foo' where :#pk", "Subquery": "select eid, id from a use index (b) where eid = 1 limit :#maxLimit for update", @@ -925,6 +1495,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "d", + "Permissions": [ + { + "TableName": "d", + "Role": 1 + } + ], "FullQuery": "delete from d where name in ('a', 'b') limit 1", "OuterQuery": "delete from d where :#pk", "Subquery": "select name from d where name in ('a', 'b') limit 1 for update", @@ -937,6 +1513,12 @@ options:PassthroughDMLs { "PlanID": "PASS_DML", "TableName": "", + "Permissions": [ + { + "TableName": "d", + "Role": 1 + } + ], "FullQuery": "delete from d where name in ('a', 'b') limit 1" } @@ -946,6 +1528,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "TABLE", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from b.a where eid = 1 and id = 1" } @@ -954,6 +1542,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a", "OuterQuery": "delete from a where :#pk", "Subquery": "select eid, id from a limit :#maxLimit for update", @@ -965,6 +1559,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a where eid + 1 = 1", "OuterQuery": "delete from a where :#pk", "Subquery": "select eid, id from a where eid + 1 = 1 limit :#maxLimit for update", @@ -976,6 +1576,12 @@ options:PassthroughDMLs { "PlanID": "DML_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a where eid = 1 and id = 1", "OuterQuery": "delete from a where :#pk", "PKValues": [1, 1], @@ -988,6 +1594,12 @@ options:PassthroughDMLs { "PlanID": "PASS_DML", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a where eid = 1 and id = 1" } @@ -996,6 +1608,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a where eid = 1", "OuterQuery": "delete from a where :#pk", "Subquery": "select eid, id from a where eid = 1 limit :#maxLimit for update", @@ -1007,6 +1625,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a where eid = 1 order by id desc", "OuterQuery": "delete from a where :#pk order by id desc", "Subquery": "select eid, id from a where eid = 1 order by id desc limit :#maxLimit for update", @@ -1018,6 +1642,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a where eid = 1.0 and id = 1", "OuterQuery": "delete from a where :#pk", "Subquery": "select eid, id from a where eid = 1.0 and id = 1 limit :#maxLimit for update", @@ -1029,6 +1659,12 @@ options:PassthroughDMLs { "PlanID": "DML_SUBQUERY", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a where eid = 1 and name = 'foo'", "OuterQuery": "delete from a where :#pk", "Subquery": "select eid, id from a where eid = 1 and name = 'foo' limit :#maxLimit for update", @@ -1041,6 +1677,12 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "TABLE_NOINDEX", "TableName": "c", + "Permissions": [ + { + "TableName": "c", + "Role": 1 + } + ], "FullQuery": "delete from c", "WhereClause": "" } @@ -1050,6 +1692,12 @@ options:PassthroughDMLs { "PlanID":"DML_SUBQUERY", "TableName":"a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery":"delete from a where eid + 1 = 1 and id = 1", "OuterQuery":"delete from a where :#pk", "Subquery":"select eid, id from a where eid + 1 = 1 and id = 1 limit :#maxLimit for update", @@ -1061,6 +1709,12 @@ options:PassthroughDMLs { "PlanID": "DML_PK", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery": "delete from a where (eid = 1) and id = 1", "OuterQuery": "delete from a where :#pk", "PKValues": [1, 1], @@ -1072,6 +1726,12 @@ options:PassthroughDMLs { "PlanID":"DML_PK", "TableName":"a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery":"delete from a where eid in (1, 2) and id = 1", "OuterQuery":"delete from a where :#pk", "PKValues":[[1,2],1], @@ -1083,6 +1743,12 @@ options:PassthroughDMLs { "PlanID":"DML_SUBQUERY", "TableName":"a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery":"delete from a where eid in (1, 2) and id in (1, 2)", "OuterQuery":"delete from a where :#pk", "Subquery":"select eid, id from a where eid in (1, 2) and id in (1, 2) limit :#maxLimit for update", @@ -1094,6 +1760,12 @@ options:PassthroughDMLs { "PlanID":"DML_SUBQUERY", "TableName":"a", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + } + ], "FullQuery":"delete from a where eid = 1 and eid = 2", "OuterQuery":"delete from a where :#pk", "Subquery":"select eid, id from a where eid = 1 and eid = 2 limit :#maxLimit for update", @@ -1106,6 +1778,20 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "MULTI_TABLE", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + }, + { + "TableName": "b", + "Role": 1 + }, + { + "TableName": "c", + "Role": 1 + } + ], "FullQuery": "delete a, b from a, b, c where a.id = b.id and b.id = c.id and c.name = 'foo'" } @@ -1115,6 +1801,16 @@ options:PassthroughDMLs "PlanID": "PASS_DML", "Reason": "MULTI_TABLE", "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 1 + }, + { + "TableName": "b", + "Role": 1 + } + ], "FullQuery": "delete a from a join b on a.id = b.id where a.name = 'foo'" } @@ -1123,6 +1819,12 @@ options:PassthroughDMLs { "PlanID": "NEXTVAL", "TableName": "seq", + "Permissions": [ + { + "TableName": "seq", + "Role": 0 + } + ], "PKValues":[1] } @@ -1131,6 +1833,12 @@ options:PassthroughDMLs { "PlanID": "NEXTVAL", "TableName": "seq", + "Permissions": [ + { + "TableName": "seq", + "Role": 0 + } + ], "PKValues":[10] } @@ -1139,6 +1847,12 @@ options:PassthroughDMLs { "PlanID": "NEXTVAL", "TableName": "seq", + "Permissions": [ + { + "TableName": "seq", + "Role": 0 + } + ], "PKValues":[":a"] } @@ -1147,6 +1861,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "auto", + "Permissions": [ + { + "TableName": "auto", + "Role": 1 + } + ], "FullQuery": "insert into auto values ()", "OuterQuery": "insert into auto(id) values (null)", "PKValues":[ @@ -1159,6 +1879,12 @@ options:PassthroughDMLs { "PlanID": "INSERT_PK", "TableName": "with_defaults", + "Permissions": [ + { + "TableName": "with_defaults", + "Role": 1 + } + ], "FullQuery": "insert into with_defaults values ()", "OuterQuery": "insert into with_defaults(aid, bid, cid) values (3, -2, null)", "PKValues":[ @@ -1210,42 +1936,94 @@ options:PassthroughDMLs "create table a(a int, b varchar(8))" { "PlanID": "DDL", - "TableName": "" + "TableName": "", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + } + ] } # alter "alter table a add column(a int)" { "PlanID": "DDL", - "TableName": "a" + "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + }, + { + "TableName": "a", + "Role": 2 + } + ] } # alter rename "alter table a rename b" { "PlanID": "DDL", - "TableName": "a" + "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + }, + { + "TableName": "b", + "Role": 2 + } + ] } # rename "rename table a to b" { "PlanID": "DDL", - "TableName": "a" + "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + }, + { + "TableName": "b", + "Role": 2 + } + ] } # drop "drop table a" { "PlanID": "DDL", - "TableName": "a" + "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + } + ] } # analyze "analyze table a" { "PlanID": "DDL", - "TableName": "a" + "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + }, + { + "TableName": "a", + "Role": 2 + } + ] } # reorganize partition with bind @@ -1253,6 +2031,12 @@ options:PassthroughDMLs { "PlanID": "DDL", "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + } + ], "FullQuery": "alter table a reorganize partition b into (partition c values less than (:bv), partition d values less than (maxvalue))" } @@ -1260,7 +2044,17 @@ options:PassthroughDMLs "alter table a partition by range (id) (partition p0 values less than (10), partition p1 values less than (maxvalue))" { "PlanID": "DDL", - "TableName": "a" + "TableName": "a", + "Permissions": [ + { + "TableName": "a", + "Role": 2 + }, + { + "TableName": "a", + "Role": 2 + } + ] } # show diff --git a/data/test/tabletserver/stream_cases.txt b/data/test/tabletserver/stream_cases.txt index e70dd8fef12..a42a8eabede 100644 --- a/data/test/tabletserver/stream_cases.txt +++ b/data/test/tabletserver/stream_cases.txt @@ -3,6 +3,7 @@ { "PlanID": "SELECT_STREAM", "TableName": "a", + "Permissions":[{"TableName":"a","Role":0}], "FullQuery": "select * from a" } @@ -11,6 +12,7 @@ { "PlanID": "SELECT_STREAM", "TableName": "", + "Permissions":[{"TableName":"a","Role":0},{"TableName":"b","Role":0}], "FullQuery": "select * from a join b" } @@ -23,6 +25,7 @@ { "PlanID": "SELECT_STREAM", "TableName": "", + "Permissions":[{"TableName":"a","Role":0},{"TableName":"b","Role":0}], "FullQuery": "select * from a union select * from b" } diff --git a/data/test/vtgate/dml_cases.txt b/data/test/vtgate/dml_cases.txt index b426807c8ee..a2e8540728b 100644 --- a/data/test/vtgate/dml_cases.txt +++ b/data/test/vtgate/dml_cases.txt @@ -200,6 +200,10 @@ } } +# update by primary keyspace id, changing same vindex twice +"update user_metadata set email = 'a', email = 'b' where user_id = 1" +"column has duplicate set values: 'email'" + # update by primary keyspace id, changing multiple vindex columns "update user_metadata set email = 'juan@vitess.io', address = '155 5th street' where user_id = 1" { @@ -1194,3 +1198,91 @@ "Subquery": "select column_a, column_b, column_c from multicolvin where kid = 1 for update" } } + +# delete from with no where clause +"delete from user_extra" +{ + "Original": "delete from user_extra", + "Instructions": { + "Opcode": "DeleteSharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "delete from user_extra", + "Table": "user_extra" + } +} + +# delete with non-comparison expr +"delete from user_extra where user_id between 1 and 2" +{ + "Original": "delete from user_extra where user_id between 1 and 2", + "Instructions": { + "Opcode": "DeleteSharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "delete from user_extra where user_id between 1 and 2", + "Table": "user_extra" + } +} + +# delete from with no index match +"delete from user_extra where name = 'jose'" +{ + "Original": "delete from user_extra where name = 'jose'", + "Instructions": { + "Opcode": "DeleteSharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "delete from user_extra where name = 'jose'", + "Table": "user_extra" + } +} + +# delete from with primary id in through IN clause +"delete from user_extra where user_id in (1, 2)" +{ + "Original": "delete from user_extra where user_id in (1, 2)", + "Instructions": { + "Opcode": "DeleteSharded", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "Query": "delete from user_extra where user_id in (1, 2)", + "Table": "user_extra" + } +} + +# unsharded update where inner query references outer query +"update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)" +{ + "Original": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)", + "Instructions": { + "Opcode": "UpdateUnsharded", + "Keyspace": { + "Name":"main", + "Sharded":false + }, + "Query": "update unsharded set col = (select id from unsharded_a where id = unsharded.col) where col = (select id from unsharded_b)" + } +} + +# unsharded delete where inner query references outer query +"delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)" +{ + "Original": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)", + "Instructions": { + "Opcode": "DeleteUnsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "Query": "delete from unsharded where col = (select id from unsharded_a where id = unsharded.col)" + } +} diff --git a/data/test/vtgate/unsupported_cases.txt b/data/test/vtgate/unsupported_cases.txt index 4285b6277a6..bdc37865d3d 100644 --- a/data/test/vtgate/unsupported_cases.txt +++ b/data/test/vtgate/unsupported_cases.txt @@ -239,6 +239,10 @@ "delete from unsharded where col = (select id from user)" "unsupported: sharded subqueries in DML" +# sharded delete with limit clasue +"delete from user_extra limit 10" +"unsupported: multi shard delete with limit" + # sharded subquery in unsharded subquery in unsharded delete "delete from unsharded where col = (select id from unsharded where id = (select id from user))" "unsupported: sharded subqueries in DML" @@ -251,50 +255,26 @@ "update user set val = 1" "unsupported: multi-shard where clause in DML" -# delete from with no where clause -"delete from user" -"unsupported: multi-shard where clause in DML" - # update with non-comparison expr "update user set val = 1 where id between 1 and 2" "unsupported: multi-shard where clause in DML" -# delete with non-comparison expr -"delete from user where id between 1 and 2" -"unsupported: multi-shard where clause in DML" - # update with primary id through IN clause "update user set val = 1 where id in (1, 2)" "unsupported: multi-shard where clause in DML" -# delete from with primary id through IN clause -"delete from user where id in (1, 2)" -"unsupported: multi-shard where clause in DML" - # update with non-unique key "update user set val = 1 where name = 'foo'" "unsupported: multi-shard where clause in DML" -# delete from with primary id through IN clause -"delete from user where name = 'foo'" -"unsupported: multi-shard where clause in DML" - # update with no index match "update user set val = 1 where user_id = 1" "unsupported: multi-shard where clause in DML" -# delete from with no index match -"delete from user where user_id = 1" -"unsupported: multi-shard where clause in DML" - # update by lookup with IN clause "update music set val = 1 where id in (1, 2)" "unsupported: multi-shard where clause in DML" -# delete from by lookup with IN clause -"delete from music where id in (1, 2)" -"unsupported: multi-shard where clause in DML" - # delete with multi-table targets "delete music from music where id = 1" "unsupported: multi-table delete statement in sharded keyspace" @@ -303,6 +283,10 @@ "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'" "unsupported: multi-table delete statement in sharded keyspace" +# delete from table with no where clause and owned lookup vindex +"delete from user" +"unsupported: multi shard delete on a table with owned lookup vindexes" + # update changes primary vindex column "update user set id = 1 where id = 1" "unsupported: You can't update primary vindex columns. Invalid update on vindex: user_index" diff --git a/data/test/vtgate/vindex_func_cases.txt b/data/test/vtgate/vindex_func_cases.txt index d98be955a86..bec3949165f 100644 --- a/data/test/vtgate/vindex_func_cases.txt +++ b/data/test/vtgate/vindex_func_cases.txt @@ -1,4 +1,39 @@ -# vindex func read +# vindex func read all cols +"select id, keyspace_id, range_start, range_end from user_index where id = :id" +{ + "Original": "select id, keyspace_id, range_start, range_end from user_index where id = :id", + "Instructions": { + "Opcode": "VindexMap", + "Fields": [ + { + "name": "id", + "type": 10262 + }, + { + "name": "keyspace_id", + "type": 10262 + }, + { + "name": "range_start", + "type": 10262 + }, + { + "name": "range_end", + "type": 10262 + } + ], + "Cols": [ + 0, + 1, + 2, + 3 + ], + "Vindex": "user_index", + "Value": ":id" + } +} + +# vindex func read with id repeated "select id, keyspace_id, id from user_index where id = :id" { "Original": "select id, keyspace_id, id from user_index where id = :id", diff --git a/docker/k8s/vtctlclient/Dockerfile b/docker/k8s/vtctlclient/Dockerfile index 0315e572483..1af7e5ae076 100644 --- a/docker/k8s/vtctlclient/Dockerfile +++ b/docker/k8s/vtctlclient/Dockerfile @@ -1,4 +1,10 @@ FROM vitess/base AS base FROM debian:stretch-slim COPY --from=base /vt/bin/vtctlclient /usr/bin/ +RUN apt-get update && \ + apt-get upgrade -qq && \ + apt-get install jq -qq --no-install-recommends && \ + apt-get autoremove && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* CMD ["/usr/bin/vtctlclient"] diff --git a/go/cmd/l2vtgate/index.go b/go/cmd/l2vtgate/index.go deleted file mode 100644 index 72b2637abf0..00000000000 --- a/go/cmd/l2vtgate/index.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "net/http" -) - -// This is a separate file so it can be selectively included/excluded from -// builds to opt in/out of the redirect. - -func init() { - // Anything unrecognized gets redirected to the status page. - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, "/debug/status", http.StatusFound) - }) -} diff --git a/go/cmd/l2vtgate/main.go b/go/cmd/l2vtgate/main.go deleted file mode 100644 index a1da649a7be..00000000000 --- a/go/cmd/l2vtgate/main.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "math/rand" - "strings" - "time" - - log "github.com/golang/glog" - - "github.com/youtube/vitess/go/exit" - "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/srvtopo" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/topoproto" - "github.com/youtube/vitess/go/vt/vtgate/l2vtgate" - - topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" -) - -var ( - cell = flag.String("cell", "test_nj", "cell to use") - retryCount = flag.Int("retry-count", 2, "retry count") - healthCheckRetryDelay = flag.Duration("healthcheck_retry_delay", 2*time.Millisecond, "health check retry delay") - healthCheckTimeout = flag.Duration("healthcheck_timeout", time.Minute, "the health check timeout period") - tabletTypesToWait = flag.String("tablet_types_to_wait", "", "wait till connected for specified tablet types during Gateway initialization") -) - -var resilientServer *srvtopo.ResilientServer -var healthCheck discovery.HealthCheck - -func init() { - rand.Seed(time.Now().UnixNano()) - servenv.RegisterDefaultFlags() -} - -func main() { - defer exit.Recover() - - servenv.ParseFlags("l2vtgate") - servenv.Init() - - ts := topo.Open() - defer ts.Close() - - resilientServer = srvtopo.NewResilientServer(ts, "ResilientSrvTopoServer") - - healthCheck = discovery.NewHealthCheck(*healthCheckRetryDelay, *healthCheckTimeout) - healthCheck.RegisterStats() - - tabletTypes := make([]topodatapb.TabletType, 0, 1) - if len(*tabletTypesToWait) != 0 { - for _, ttStr := range strings.Split(*tabletTypesToWait, ",") { - tt, err := topoproto.ParseTabletType(ttStr) - if err != nil { - log.Errorf("unknown tablet type: %v", ttStr) - continue - } - tabletTypes = append(tabletTypes, tt) - } - } - l2vtg := l2vtgate.Init(healthCheck, ts, resilientServer, "VttabletCall", *cell, *retryCount, tabletTypes) - - servenv.OnRun(func() { - addStatusParts(l2vtg) - }) - servenv.RunDefault() -} diff --git a/go/cmd/l2vtgate/plugin_consultopo.go b/go/cmd/l2vtgate/plugin_consultopo.go deleted file mode 100644 index 3589ac20941..00000000000 --- a/go/cmd/l2vtgate/plugin_consultopo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports consultopo to register the consul implementation of TopoServer. - -import ( - _ "github.com/youtube/vitess/go/vt/topo/consultopo" -) diff --git a/go/cmd/l2vtgate/plugin_etcd2topo.go b/go/cmd/l2vtgate/plugin_etcd2topo.go deleted file mode 100644 index cb66fc80762..00000000000 --- a/go/cmd/l2vtgate/plugin_etcd2topo.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports etcd2topo to register the etcd2 implementation of TopoServer. - -import ( - _ "github.com/youtube/vitess/go/vt/topo/etcd2topo" -) diff --git a/go/cmd/l2vtgate/plugin_grpctabletconn.go b/go/cmd/l2vtgate/plugin_grpctabletconn.go deleted file mode 100644 index d1fabfe4f60..00000000000 --- a/go/cmd/l2vtgate/plugin_grpctabletconn.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// Imports and register the gRPC tabletconn client - -import ( - _ "github.com/youtube/vitess/go/vt/vttablet/grpctabletconn" -) diff --git a/go/cmd/l2vtgate/plugin_influxdbbackend.go b/go/cmd/l2vtgate/plugin_influxdbbackend.go deleted file mode 100644 index cb1f1c75e0a..00000000000 --- a/go/cmd/l2vtgate/plugin_influxdbbackend.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -// This plugin imports influxdbbackend to register the influxdbbackend stats backend. - -import ( - _ "github.com/youtube/vitess/go/stats/influxdbbackend" -) diff --git a/go/cmd/l2vtgate/plugin_zk2topo.go b/go/cmd/l2vtgate/plugin_zk2topo.go deleted file mode 100644 index f41b5d5f66e..00000000000 --- a/go/cmd/l2vtgate/plugin_zk2topo.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - // Imports and register the zk2 TopologyServer - _ "github.com/youtube/vitess/go/vt/topo/zk2topo" -) diff --git a/go/cmd/l2vtgate/status.go b/go/cmd/l2vtgate/status.go deleted file mode 100644 index 194e77a42ad..00000000000 --- a/go/cmd/l2vtgate/status.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreedto in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/srvtopo" - _ "github.com/youtube/vitess/go/vt/status" - "github.com/youtube/vitess/go/vt/vtgate/gateway" - "github.com/youtube/vitess/go/vt/vtgate/l2vtgate" -) - -// For use by plugins which wish to avoid racing when registering status page parts. -var onStatusRegistered func() - -func addStatusParts(l2vtgate *l2vtgate.L2VTGate) { - servenv.AddStatusPart("Topology Cache", srvtopo.TopoTemplate, func() interface{} { - return resilientServer.CacheStatus() - }) - servenv.AddStatusPart("Gateway Status", gateway.StatusTemplate, func() interface{} { - return l2vtgate.GetGatewayCacheStatus() - }) - servenv.AddStatusPart("Health Check Cache", discovery.HealthCheckTemplate, func() interface{} { - return healthCheck.CacheStatus() - }) - if onStatusRegistered != nil { - onStatusRegistered() - } -} diff --git a/go/cmd/mysqlctld/mysqlctld.go b/go/cmd/mysqlctld/mysqlctld.go index 4fbef020f9f..1dee0fa35af 100644 --- a/go/cmd/mysqlctld/mysqlctld.go +++ b/go/cmd/mysqlctld/mysqlctld.go @@ -98,7 +98,7 @@ func main() { } mysqld.OnTerm(onTermFunc) - err = mysqld.RefreshConfig() + err = mysqld.RefreshConfig(ctx) if err != nil { log.Errorf("failed to refresh config: %v", err) exit.Return(1) diff --git a/go/cmd/l2vtgate/plugin_grpcqueryservice.go b/go/cmd/vtgate/plugin_grpcqueryservice.go similarity index 85% rename from go/cmd/l2vtgate/plugin_grpcqueryservice.go rename to go/cmd/vtgate/plugin_grpcqueryservice.go index 11d8d8dc23e..9a45988a946 100644 --- a/go/cmd/l2vtgate/plugin_grpcqueryservice.go +++ b/go/cmd/vtgate/plugin_grpcqueryservice.go @@ -20,13 +20,13 @@ package main import ( "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/vtgate/l2vtgate" + "github.com/youtube/vitess/go/vt/vtgate" "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" "github.com/youtube/vitess/go/vt/vttablet/queryservice" ) func init() { - l2vtgate.RegisterL2VTGates = append(l2vtgate.RegisterL2VTGates, func(qs queryservice.QueryService) { + vtgate.RegisterL2VTGates = append(vtgate.RegisterL2VTGates, func(qs queryservice.QueryService) { if servenv.GRPCCheckServiceMap("queryservice") { grpcqueryservice.Register(servenv.GRPCServer, qs) } diff --git a/go/pools/numbered.go b/go/pools/numbered.go index bfc4de66097..1eb43b77b1a 100644 --- a/go/pools/numbered.go +++ b/go/pools/numbered.go @@ -20,26 +20,42 @@ import ( "fmt" "sync" "time" + + "github.com/youtube/vitess/go/cache" ) // Numbered allows you to manage resources by tracking them with numbers. // There are no interface restrictions on what you can track. type Numbered struct { - mu sync.Mutex - empty *sync.Cond // Broadcast when pool becomes empty - resources map[int64]*numberedWrapper + mu sync.Mutex + empty *sync.Cond // Broadcast when pool becomes empty + resources map[int64]*numberedWrapper + recentlyUnregistered *cache.LRUCache } type numberedWrapper struct { - val interface{} - inUse bool - purpose string - timeCreated time.Time - timeUsed time.Time + val interface{} + inUse bool + purpose string + timeCreated time.Time + timeUsed time.Time + enforceTimeout bool +} + +type unregistered struct { + reason string + timeUnregistered time.Time +} + +func (u *unregistered) Size() int { + return 1 } func NewNumbered() *Numbered { - n := &Numbered{resources: make(map[int64]*numberedWrapper)} + n := &Numbered{ + resources: make(map[int64]*numberedWrapper), + recentlyUnregistered: cache.NewLRUCache(1000), + } n.empty = sync.NewCond(&n.mu) return n } @@ -47,7 +63,7 @@ func NewNumbered() *Numbered { // Register starts tracking a resource by the supplied id. // It does not lock the object. // It returns an error if the id already exists. -func (nu *Numbered) Register(id int64, val interface{}) error { +func (nu *Numbered) Register(id int64, val interface{}, enforceTimeout bool) error { nu.mu.Lock() defer nu.mu.Unlock() if _, ok := nu.resources[id]; ok { @@ -55,22 +71,24 @@ func (nu *Numbered) Register(id int64, val interface{}) error { } now := time.Now() nu.resources[id] = &numberedWrapper{ - val: val, - timeCreated: now, - timeUsed: now, + val: val, + timeCreated: now, + timeUsed: now, + enforceTimeout: enforceTimeout, } return nil } // Unregiester forgets the specified resource. // If the resource is not present, it's ignored. -func (nu *Numbered) Unregister(id int64) { +func (nu *Numbered) Unregister(id int64, reason string) { nu.mu.Lock() defer nu.mu.Unlock() delete(nu.resources, id) if len(nu.resources) == 0 { nu.empty.Broadcast() } + nu.recentlyUnregistered.Set(fmt.Sprintf("%v", id), &unregistered{reason: reason, timeUnregistered: time.Now()}) } // Get locks the resource for use. It accepts a purpose as a string. @@ -81,6 +99,10 @@ func (nu *Numbered) Get(id int64, purpose string) (val interface{}, err error) { defer nu.mu.Unlock() nw, ok := nu.resources[id] if !ok { + if val, ok := nu.recentlyUnregistered.Get(fmt.Sprintf("%v", id)); ok { + unreg := val.(*unregistered) + return nil, fmt.Errorf("ended at %v (%v)", unreg.timeUnregistered.Format("2006-01-02 15:04:05.000 MST"), unreg.reason) + } return nil, fmt.Errorf("not found") } if nw.inUse { @@ -120,7 +142,7 @@ func (nu *Numbered) GetOutdated(age time.Duration, purpose string) (vals []inter defer nu.mu.Unlock() now := time.Now() for _, nw := range nu.resources { - if nw.inUse { + if nw.inUse || !nw.enforceTimeout { continue } if nw.timeCreated.Add(age).Sub(now) <= 0 { diff --git a/go/pools/numbered_test.go b/go/pools/numbered_test.go index e3fc4f5a976..48cfbb4cbf5 100644 --- a/go/pools/numbered_test.go +++ b/go/pools/numbered_test.go @@ -17,6 +17,7 @@ limitations under the License. package pools import ( + "strings" "testing" "time" ) @@ -26,10 +27,10 @@ func TestNumbered(t *testing.T) { p := NewNumbered() var err error - if err = p.Register(id, id); err != nil { + if err = p.Register(id, id, true); err != nil { t.Errorf("Error %v", err) } - if err = p.Register(id, id); err.Error() != "already present" { + if err = p.Register(id, id, true); err.Error() != "already present" { t.Errorf("want 'already present', got '%v'", err) } var v interface{} @@ -46,22 +47,28 @@ func TestNumbered(t *testing.T) { if v, err = p.Get(1, "test2"); err.Error() != "not found" { t.Errorf("want 'not found', got '%v'", err) } - p.Unregister(1) // Should not fail - p.Unregister(0) + p.Unregister(1, "test") // Should not fail + p.Unregister(0, "test") // p is now empty - p.Register(id, id) + if v, err = p.Get(0, "test3"); !(strings.HasPrefix(err.Error(), "ended at") && strings.HasSuffix(err.Error(), "(test)")) { + t.Errorf("want prefix 'ended at' and suffix '(test'), got '%v'", err) + } + + p.Register(id, id, true) + id++ + p.Register(id, id, true) id++ - p.Register(id, id) + p.Register(id, id, false) time.Sleep(300 * time.Millisecond) id++ - p.Register(id, id) + p.Register(id, id, true) time.Sleep(100 * time.Millisecond) - // p has 0, 1, 2 (0 & 1 are aged) + // p has 0, 1, 2, 3 (0, 1, 2 are aged, but 2 is not enforced) vals := p.GetOutdated(200*time.Millisecond, "by outdated") - if len(vals) != 2 { - t.Errorf("want 2, got %v", len(vals)) + if num := len(vals); num != 2 { + t.Errorf("want 2, got %v", num) } if v, err = p.Get(vals[0].(int64), "test1"); err.Error() != "in use: by outdated" { t.Errorf("want 'in use: by outdated', got '%v'", err) @@ -69,6 +76,7 @@ func TestNumbered(t *testing.T) { for _, v := range vals { p.Put(v.(int64)) } + p.Put(2) // put to 2 to ensure it's not idle time.Sleep(100 * time.Millisecond) // p has 0, 1, 2 (2 is idle) @@ -79,18 +87,19 @@ func TestNumbered(t *testing.T) { if v, err = p.Get(vals[0].(int64), "test1"); err.Error() != "in use: by idle" { t.Errorf("want 'in use: by idle', got '%v'", err) } - if vals[0].(int64) != 2 { - t.Errorf("want 2, got %v", vals[0]) + if vals[0].(int64) != 3 { + t.Errorf("want 3, got %v", vals[0]) } - p.Unregister(vals[0].(int64)) + p.Unregister(vals[0].(int64), "test") - // p has 0 & 1 - if p.Size() != 2 { - t.Errorf("want 2, got %v", p.Size()) + // p has 0, 1, and 2 + if p.Size() != 3 { + t.Errorf("want 3, got %v", p.Size()) } go func() { - p.Unregister(0) - p.Unregister(1) + p.Unregister(0, "test") + p.Unregister(1, "test") + p.Unregister(2, "test") }() p.WaitForEmpty() } diff --git a/go/vt/discovery/tablet_stats_cache.go b/go/vt/discovery/tablet_stats_cache.go index d78f3819739..7c4e471ecce 100644 --- a/go/vt/discovery/tablet_stats_cache.go +++ b/go/vt/discovery/tablet_stats_cache.go @@ -17,11 +17,13 @@ limitations under the License. package discovery import ( + "math" "sync" log "github.com/golang/glog" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + "github.com/youtube/vitess/go/vt/srvtopo" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "golang.org/x/net/context" @@ -31,7 +33,7 @@ import ( // current list of available TabletStats, and a serving list: // - for master tablets, only the current master is kept. // - for non-master tablets, we filter the list using FilterByReplicationLag. -// It keeps entries for all tablets in the cell it's configured to serve for, +// It keeps entries for all tablets in the cell(s) it's configured to serve for, // and for the master independently of which cell it's in. // Note the healthy tablet computation is done when we receive a tablet // update only, not at serving time. @@ -42,16 +44,18 @@ type TabletStatsCache struct { // cell is the cell we are keeping all tablets for. // Note we keep track of all master tablets in all cells. cell string - // mu protects the entries map. It does not protect individual - // entries in the map. - mu sync.RWMutex // ts is the topo server in use. ts *topo.Server + // mu protects the following fields. It does not protect individual + // entries in the entries map. + mu sync.RWMutex // entries maps from keyspace/shard/tabletType to our cache. entries map[string]map[string]map[topodatapb.TabletType]*tabletStatsCacheEntry + // tsm is a helper to broadcast aggregate stats. + tsm srvtopo.TargetStatsMultiplexer } -// tabletStatsCacheEntry is the per keyspace/shard/tabaletType +// tabletStatsCacheEntry is the per keyspace/shard/tabletType // entry of the in-memory map for TabletStatsCache. type tabletStatsCacheEntry struct { // mu protects the rest of this structure. @@ -61,6 +65,43 @@ type tabletStatsCacheEntry struct { all map[string]*TabletStats // healthy only has the healthy ones. healthy []*TabletStats + // aggregates has the per-cell aggregates. + aggregates map[string]*querypb.AggregateStats +} + +func (e *tabletStatsCacheEntry) updateHealthyMapForMaster(ts *TabletStats) { + if ts.Up { + // We have an Up master. + if len(e.healthy) == 0 { + // We have a new Up server, just remember it. + e.healthy = append(e.healthy, ts) + return + } + + // We already have one up server, see if we + // need to replace it. + if ts.TabletExternallyReparentedTimestamp < e.healthy[0].TabletExternallyReparentedTimestamp { + log.Warningf("not marking healthy master %s as Up for %s because its externally reparented timestamp is smaller than the highest known timestamp from previous MASTERs %s: %d < %d ", + topoproto.TabletAliasString(ts.Tablet.Alias), + topoproto.KeyspaceShardString(ts.Target.Keyspace, ts.Target.Shard), + topoproto.TabletAliasString(e.healthy[0].Tablet.Alias), + ts.TabletExternallyReparentedTimestamp, + e.healthy[0].TabletExternallyReparentedTimestamp) + return + } + + // Just replace it. + e.healthy[0] = ts + return + } + + // We have a Down master, remove it only if it's exactly the same. + if len(e.healthy) != 0 { + if ts.Key == e.healthy[0].Key { + // Same guy, remove it. + e.healthy = nil + } + } } // NewTabletStatsCache creates a TabletStatsCache, and registers @@ -88,6 +129,7 @@ func newTabletStatsCache(hc HealthCheck, ts *topo.Server, cell string, setListen cell: cell, ts: ts, entries: make(map[string]map[string]map[topodatapb.TabletType]*tabletStatsCacheEntry), + tsm: srvtopo.NewTargetStatsMultiplexer(), } if setListener { @@ -139,7 +181,8 @@ func (tc *TabletStatsCache) getOrCreateEntry(target *querypb.Target) *tabletStat e, ok := t[target.TabletType] if !ok { e = &tabletStatsCacheEntry{ - all: make(map[string]*TabletStats), + all: make(map[string]*TabletStats), + aggregates: make(map[string]*querypb.AggregateStats), } t[target.TabletType] = e } @@ -190,54 +233,145 @@ func (tc *TabletStatsCache) StatsUpdate(ts *TabletStats) { } } - // The healthy list is different for TabletType_MASTER: we - // only keep the most recent one. + // Update our healthy list. + var allArray []*TabletStats if ts.Target.TabletType == topodatapb.TabletType_MASTER { - if ts.Up { - // We have an Up master - if len(e.healthy) == 0 { - // We have a new Up server, just remember it. - e.healthy = append(e.healthy, ts) - return - } + // The healthy list is different for TabletType_MASTER: we + // only keep the most recent one. + e.updateHealthyMapForMaster(ts) + for _, s := range e.all { + allArray = append(allArray, s) + } + } else { + // For non-master, if it is a trivial update, + // we just skip everything else. We don't even update the + // aggregate stats. + if trivialNonMasterUpdate { + return + } + + // Now we need to do some work. Recompute our healthy list. + allArray = make([]*TabletStats, 0, len(e.all)) + for _, s := range e.all { + allArray = append(allArray, s) + } + e.healthy = FilterByReplicationLag(allArray) + } + + tc.updateAggregateMap(ts.Target.Keyspace, ts.Target.Shard, ts.Target.TabletType, e, allArray) +} - // We already have one up server, see if we - // need to replace it. - if ts.TabletExternallyReparentedTimestamp < e.healthy[0].TabletExternallyReparentedTimestamp { - log.Warningf("not marking healthy master %s as Up for %s because its externally reparented timestamp is smaller than the highest known timestamp from previous MASTERs %s: %d < %d ", - topoproto.TabletAliasString(ts.Tablet.Alias), - topoproto.KeyspaceShardString(ts.Target.Keyspace, ts.Target.Shard), - topoproto.TabletAliasString(e.healthy[0].Tablet.Alias), - ts.TabletExternallyReparentedTimestamp, - e.healthy[0].TabletExternallyReparentedTimestamp) - return +// MakeAggregateMap takes a list of TabletStats and builds a per-cell +// AggregateStats map. +func MakeAggregateMap(stats []*TabletStats) map[string]*querypb.AggregateStats { + result := make(map[string]*querypb.AggregateStats) + for _, ts := range stats { + cell := ts.Tablet.Alias.Cell + agg, ok := result[cell] + if !ok { + agg = &querypb.AggregateStats{ + SecondsBehindMasterMin: math.MaxUint32, } + result[cell] = agg + } - // Just replace it - e.healthy[0] = ts - } else { - // We have a Down master, remove it only if - // it's exactly the same - if len(e.healthy) != 0 { - if ts.Key == e.healthy[0].Key { - // same guy, remove it - e.healthy = nil - } + if ts.Serving && ts.LastError == nil { + agg.HealthyTabletCount++ + if ts.Stats.SecondsBehindMaster < agg.SecondsBehindMasterMin { + agg.SecondsBehindMasterMin = ts.Stats.SecondsBehindMaster + } + if ts.Stats.SecondsBehindMaster > agg.SecondsBehindMasterMax { + agg.SecondsBehindMasterMax = ts.Stats.SecondsBehindMaster } + } else { + agg.UnhealthyTabletCount++ } - return } + return result +} - // For non-master, we just recompute the healthy list - // using FilterByReplicationLag, if we need to. - if trivialNonMasterUpdate { - return +// MakeAggregateMapDiff computes the entries that need to be broadcast +// when the map goes from oldMap to newMap. +func MakeAggregateMapDiff(keyspace, shard string, tabletType topodatapb.TabletType, ter int64, oldMap map[string]*querypb.AggregateStats, newMap map[string]*querypb.AggregateStats) []*srvtopo.TargetStatsEntry { + var result []*srvtopo.TargetStatsEntry + for cell, oldValue := range oldMap { + newValue, ok := newMap[cell] + if ok { + // We have both an old and a new value. If equal, + // skip it. + if oldValue.HealthyTabletCount == newValue.HealthyTabletCount && + oldValue.UnhealthyTabletCount == newValue.UnhealthyTabletCount && + oldValue.SecondsBehindMasterMin == newValue.SecondsBehindMasterMin && + oldValue.SecondsBehindMasterMax == newValue.SecondsBehindMasterMax { + continue + } + // The new value is different, send it. + result = append(result, &srvtopo.TargetStatsEntry{ + Target: &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: cell, + }, + Stats: newValue, + TabletExternallyReparentedTimestamp: ter, + }) + } else { + // We only have the old value, send an empty + // record to clear it. + result = append(result, &srvtopo.TargetStatsEntry{ + Target: &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: cell, + }, + }) + } } - allArray := make([]*TabletStats, 0, len(e.all)) - for _, s := range e.all { - allArray = append(allArray, s) + + for cell, newValue := range newMap { + if _, ok := oldMap[cell]; ok { + continue + } + // New value, no old value, just send it. + result = append(result, &srvtopo.TargetStatsEntry{ + Target: &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: cell, + }, + Stats: newValue, + TabletExternallyReparentedTimestamp: ter, + }) } - e.healthy = FilterByReplicationLag(allArray) + return result +} + +// updateAggregateMap will update the aggregate map for the +// tabletStatsCacheEntry. It may broadcast the changes too if we have listeners. +func (tc *TabletStatsCache) updateAggregateMap(keyspace, shard string, tabletType topodatapb.TabletType, e *tabletStatsCacheEntry, stats []*TabletStats) { + // Save the new value + oldAgg := e.aggregates + newAgg := MakeAggregateMap(stats) + e.aggregates = newAgg + + // And broadcast the change in the background. + go func() { + tc.mu.RLock() + defer tc.mu.RUnlock() + if tc.tsm.HasSubscribers() { + var ter int64 + if len(stats) > 0 { + ter = stats[0].TabletExternallyReparentedTimestamp + } + diffs := MakeAggregateMapDiff(keyspace, shard, tabletType, ter, oldAgg, newAgg) + for _, d := range diffs { + tc.tsm.Broadcast(d) + } + } + }() } // GetTabletStats returns the full list of available targets. @@ -284,5 +418,82 @@ func (tc *TabletStatsCache) ResetForTesting() { tc.entries = make(map[string]map[string]map[topodatapb.TabletType]*tabletStatsCacheEntry) } +// Subscribe is part of the TargetStatsListener interface. +func (tc *TabletStatsCache) Subscribe() (int, []srvtopo.TargetStatsEntry, <-chan (*srvtopo.TargetStatsEntry), error) { + var allTS []srvtopo.TargetStatsEntry + + // Make sure the map cannot change. Also blocks any update from + // propagating. + tc.mu.Lock() + defer tc.mu.Unlock() + for keyspace, shardMap := range tc.entries { + for shard, typeMap := range shardMap { + for tabletType, e := range typeMap { + e.mu.RLock() + var ter int64 + if len(e.healthy) > 0 { + ter = e.healthy[0].TabletExternallyReparentedTimestamp + } + for cell, agg := range e.aggregates { + allTS = append(allTS, srvtopo.TargetStatsEntry{ + Target: &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: cell, + }, + Stats: agg, + TabletExternallyReparentedTimestamp: ter, + }) + } + e.mu.RUnlock() + } + } + } + + // Now create the listener, add it to our list. + id, c := tc.tsm.Subscribe() + return id, allTS, c, nil +} + +// Unsubscribe is part of the TargetStatsListener interface. +func (tc *TabletStatsCache) Unsubscribe(i int) error { + tc.mu.Lock() + defer tc.mu.Unlock() + return tc.tsm.Unsubscribe(i) +} + +// GetAggregateStats is part of the TargetStatsListener interface. +func (tc *TabletStatsCache) GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, error) { + e := tc.getEntry(target.Keyspace, target.Shard, target.TabletType) + if e == nil { + return nil, topo.ErrNoNode + } + + e.mu.RLock() + defer e.mu.RUnlock() + agg, ok := e.aggregates[target.Cell] + if !ok { + return nil, topo.ErrNoNode + } + return agg, nil +} + +// GetMasterCell is part of the TargetStatsListener interface. +func (tc *TabletStatsCache) GetMasterCell(keyspace, shard string) (cell string, err error) { + e := tc.getEntry(keyspace, shard, topodatapb.TabletType_MASTER) + if e == nil { + return "", topo.ErrNoNode + } + + e.mu.RLock() + defer e.mu.RUnlock() + for cell := range e.aggregates { + return cell, nil + } + return "", topo.ErrNoNode +} + // Compile-time interface check. var _ HealthCheckStatsListener = (*TabletStatsCache)(nil) +var _ srvtopo.TargetStatsListener = (*TabletStatsCache)(nil) diff --git a/go/vt/discovery/tablet_stats_cache_wait.go b/go/vt/discovery/tablet_stats_cache_wait.go index 7d7289fd724..adbe3a131c4 100644 --- a/go/vt/discovery/tablet_stats_cache_wait.go +++ b/go/vt/discovery/tablet_stats_cache_wait.go @@ -17,16 +17,10 @@ limitations under the License. package discovery import ( - "sync" "time" - log "github.com/golang/glog" "golang.org/x/net/context" - "github.com/youtube/vitess/go/vt/concurrency" - "github.com/youtube/vitess/go/vt/srvtopo" - "github.com/youtube/vitess/go/vt/topo" - querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) @@ -57,87 +51,13 @@ func (tc *TabletStatsCache) WaitForAnyTablet(ctx context.Context, cell, keyspace } // WaitForAllServingTablets waits for at least one healthy serving tablet in -// the given cell for all keyspaces / shards before returning. +// each given target before returning. // It will return ctx.Err() if the context is canceled. // It will return an error if it can't read the necessary topology records. -func (tc *TabletStatsCache) WaitForAllServingTablets(ctx context.Context, ts srvtopo.Server, cell string, types []topodatapb.TabletType) error { - targets, err := FindAllTargets(ctx, ts, cell, types) - if err != nil { - return err - } - +func (tc *TabletStatsCache) WaitForAllServingTablets(ctx context.Context, targets []*querypb.Target) error { return tc.waitForTablets(ctx, targets, true) } -// FindAllTargets goes through all serving shards in the topology -// for the provided tablet types. It returns one Target object per -// keyspace / shard / matching TabletType. -func FindAllTargets(ctx context.Context, ts srvtopo.Server, cell string, tabletTypes []topodatapb.TabletType) ([]*querypb.Target, error) { - ksNames, err := ts.GetSrvKeyspaceNames(ctx, cell) - if err != nil { - return nil, err - } - - var targets []*querypb.Target - var wg sync.WaitGroup - var mu sync.Mutex - var errRecorder concurrency.AllErrorRecorder - for _, ksName := range ksNames { - wg.Add(1) - go func(keyspace string) { - defer wg.Done() - - // Get SrvKeyspace for cell/keyspace. - ks, err := ts.GetSrvKeyspace(ctx, cell, keyspace) - if err != nil { - if err == topo.ErrNoNode { - // Possibly a race condition, or leftover - // crud in the topology service. Just log it. - log.Warningf("GetSrvKeyspace(%v, %v) returned ErrNoNode, skipping that SrvKeyspace", cell, keyspace) - } else { - // More serious error, abort. - errRecorder.RecordError(err) - } - return - } - - // Get all shard names that are used for serving. - for _, ksPartition := range ks.Partitions { - // Check we're waiting for tablets of that type. - waitForIt := false - for _, tt := range tabletTypes { - if tt == ksPartition.ServedType { - waitForIt = true - } - } - if !waitForIt { - continue - } - - // Add all the shards. Note we can't have - // duplicates, as there is only one entry per - // TabletType in the Partitions list. - mu.Lock() - for _, shard := range ksPartition.ShardReferences { - targets = append(targets, &querypb.Target{ - Cell: cell, - Keyspace: keyspace, - Shard: shard.Name, - TabletType: ksPartition.ServedType, - }) - } - mu.Unlock() - } - }(ksName) - } - wg.Wait() - if errRecorder.HasErrors() { - return nil, errRecorder.Error() - } - - return targets, nil -} - // waitForTablets is the internal method that polls for tablets. func (tc *TabletStatsCache) waitForTablets(ctx context.Context, targets []*querypb.Target, requireServing bool) error { for { diff --git a/go/vt/discovery/tablet_stats_cache_wait_test.go b/go/vt/discovery/tablet_stats_cache_wait_test.go index ceed880eded..a1f4bd7d983 100644 --- a/go/vt/discovery/tablet_stats_cache_wait_test.go +++ b/go/vt/discovery/tablet_stats_cache_wait_test.go @@ -17,158 +17,17 @@ limitations under the License. package discovery import ( - "flag" - "reflect" - "sort" "testing" "time" "golang.org/x/net/context" - "github.com/youtube/vitess/go/vt/srvtopo" "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/memorytopo" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) -// To sort []*querypb.Target for comparison. -type TargetArray []*querypb.Target - -func (a TargetArray) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a TargetArray) Len() int { return len(a) } -func (a TargetArray) Less(i, j int) bool { - if a[i].Cell != a[j].Cell { - return a[i].Cell < a[j].Cell - } - if a[i].Keyspace != a[j].Keyspace { - return a[i].Keyspace < a[j].Keyspace - } - if a[i].Shard != a[j].Shard { - return a[i].Shard < a[j].Shard - } - return a[i].TabletType < a[j].TabletType -} - -func TestFindAllKeyspaceShards(t *testing.T) { - ctx := context.Background() - ts := memorytopo.NewServer("cell1", "cell2") - flag.Set("srv_topo_cache_ttl", "0s") // No caching values - flag.Set("srv_topo_cache_refresh", "0s") // No caching values - rs := srvtopo.NewResilientServer(ts, "TestFindAllKeyspaceShards") - - // No keyspace / shards. - ks, err := FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_MASTER}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if len(ks) > 0 { - t.Errorf("why did I get anything? %v", ks) - } - - // Add one. - if err := ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace", &topodatapb.SrvKeyspace{ - Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ - { - ServedType: topodatapb.TabletType_MASTER, - ShardReferences: []*topodatapb.ShardReference{ - { - Name: "test_shard0", - }, - }, - }, - }, - }); err != nil { - t.Fatalf("can't add srvKeyspace: %v", err) - } - - // Get it. - ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_MASTER}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(ks, []*querypb.Target{ - { - Cell: "cell1", - Keyspace: "test_keyspace", - Shard: "test_shard0", - TabletType: topodatapb.TabletType_MASTER, - }, - }) { - t.Errorf("got wrong value: %v", ks) - } - - // Add another one. - if err := ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace2", &topodatapb.SrvKeyspace{ - Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ - { - ServedType: topodatapb.TabletType_MASTER, - ShardReferences: []*topodatapb.ShardReference{ - { - Name: "test_shard1", - }, - }, - }, - { - ServedType: topodatapb.TabletType_REPLICA, - ShardReferences: []*topodatapb.ShardReference{ - { - Name: "test_shard2", - }, - }, - }, - }, - }); err != nil { - t.Fatalf("can't add srvKeyspace: %v", err) - } - - // Get it for all types. - ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - sort.Sort(TargetArray(ks)) - if !reflect.DeepEqual(ks, []*querypb.Target{ - { - Cell: "cell1", - Keyspace: "test_keyspace", - Shard: "test_shard0", - TabletType: topodatapb.TabletType_MASTER, - }, - { - Cell: "cell1", - Keyspace: "test_keyspace2", - Shard: "test_shard1", - TabletType: topodatapb.TabletType_MASTER, - }, - { - Cell: "cell1", - Keyspace: "test_keyspace2", - Shard: "test_shard2", - TabletType: topodatapb.TabletType_REPLICA, - }, - }) { - t.Errorf("got wrong value: %v", ks) - } - - // Only get the REPLICA targets. - ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if !reflect.DeepEqual(ks, []*querypb.Target{ - { - Cell: "cell1", - Keyspace: "test_keyspace2", - Shard: "test_shard2", - TabletType: topodatapb.TabletType_REPLICA, - }, - }) { - t.Errorf("got wrong value: %v", ks) - } -} - func TestWaitForTablets(t *testing.T) { shortCtx, shortCancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer shortCancel() diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index a2a3938acd3..aaaa57d1d91 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -322,7 +322,7 @@ func backup(ctx context.Context, mysqld MysqlDaemon, logger logutil.Logger, bh b usable := backupErr == nil // Try to restart mysqld - err = mysqld.RefreshConfig() + err = mysqld.RefreshConfig(ctx) if err != nil { return usable, fmt.Errorf("can't refresh mysqld config: %v", err) } diff --git a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go index 41430b7657a..72ba70face4 100644 --- a/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon/fakemysqldaemon.go @@ -190,7 +190,7 @@ func (fmd *FakeMysqlDaemon) ReinitConfig(ctx context.Context) error { } // RefreshConfig is part of the MysqlDaemon interface -func (fmd *FakeMysqlDaemon) RefreshConfig() error { +func (fmd *FakeMysqlDaemon) RefreshConfig(ctx context.Context) error { return nil } diff --git a/go/vt/mysqlctl/grpcmysqlctlclient/client.go b/go/vt/mysqlctl/grpcmysqlctlclient/client.go index 09e54093df0..3db377ae0ef 100644 --- a/go/vt/mysqlctl/grpcmysqlctlclient/client.go +++ b/go/vt/mysqlctl/grpcmysqlctlclient/client.go @@ -91,6 +91,14 @@ func (c *client) ReinitConfig(ctx context.Context) error { }) } +// RefreshConfig is part of the MysqlctlClient interface. +func (c *client) RefreshConfig(ctx context.Context) error { + return c.withRetry(ctx, func() error { + _, err := c.c.RefreshConfig(ctx, &mysqlctlpb.RefreshConfigRequest{}) + return err + }) +} + // Close is part of the MysqlctlClient interface. func (c *client) Close() { c.cc.Close() diff --git a/go/vt/mysqlctl/grpcmysqlctlserver/server.go b/go/vt/mysqlctl/grpcmysqlctlserver/server.go index a5898a03e73..1948cc59480 100644 --- a/go/vt/mysqlctl/grpcmysqlctlserver/server.go +++ b/go/vt/mysqlctl/grpcmysqlctlserver/server.go @@ -54,6 +54,11 @@ func (s *server) ReinitConfig(ctx context.Context, request *mysqlctlpb.ReinitCon return &mysqlctlpb.ReinitConfigResponse{}, s.mysqld.ReinitConfig(ctx) } +// RefreshConfig implements the server side of the MysqlctlClient interface. +func (s *server) RefreshConfig(ctx context.Context, request *mysqlctlpb.RefreshConfigRequest) (*mysqlctlpb.RefreshConfigResponse, error) { + return &mysqlctlpb.RefreshConfigResponse{}, s.mysqld.RefreshConfig(ctx) +} + // StartServer registers the Server for RPCs. func StartServer(s *grpc.Server, mysqld *mysqlctl.Mysqld) { mysqlctlpb.RegisterMysqlCtlServer(s, &server{mysqld}) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index a08f20df9f5..8c9bcde062c 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -39,7 +39,7 @@ type MysqlDaemon interface { Shutdown(ctx context.Context, waitForMysqld bool) error RunMysqlUpgrade() error ReinitConfig(ctx context.Context) error - RefreshConfig() error + RefreshConfig(ctx context.Context) error Wait(ctx context.Context) error // GetMysqlPort returns the current port mysql is listening on. diff --git a/go/vt/mysqlctl/mysqlctlclient/interface.go b/go/vt/mysqlctl/mysqlctlclient/interface.go index 5a0c8d27ffe..eb24e5c9757 100644 --- a/go/vt/mysqlctl/mysqlctlclient/interface.go +++ b/go/vt/mysqlctl/mysqlctlclient/interface.go @@ -42,6 +42,9 @@ type MysqlctlClient interface { // ReinitConfig calls Mysqld.ReinitConfig remotely. ReinitConfig(ctx context.Context) error + // RefreshConfig calls Mysqld.RefreshConfig remotely. + RefreshConfig(ctx context.Context) error + // Close will terminate the connection. This object won't be used anymore. Close() } diff --git a/go/vt/mysqlctl/mysqld.go b/go/vt/mysqlctl/mysqld.go index 081eda91d07..fcd219a04d6 100644 --- a/go/vt/mysqlctl/mysqld.go +++ b/go/vt/mysqlctl/mysqld.go @@ -637,7 +637,18 @@ func getMycnfTemplates(root string) []string { // RefreshConfig attempts to recreate the my.cnf from templates, and log and // swap in to place if it's updated. It keeps a copy of the last version in case fallback is required. // Should be called from a stable replica, server_id is not regenerated. -func (mysqld *Mysqld) RefreshConfig() error { +func (mysqld *Mysqld) RefreshConfig(ctx context.Context) error { + // Execute as remote action on mysqlctld if requested. + if *socketFile != "" { + log.Infof("executing Mysqld.RefreshConfig() remotely via mysqlctld server: %v", *socketFile) + client, err := mysqlctlclient.New("unix", *socketFile) + if err != nil { + return fmt.Errorf("can't dial mysqlctld: %v", err) + } + defer client.Close() + return client.RefreshConfig(ctx) + } + log.Info("Checking for updates to my.cnf") root, err := vtenv.VtRoot() if err != nil { diff --git a/go/vt/mysqlproxy/mysqlproxy.go b/go/vt/mysqlproxy/mysqlproxy.go index e3eebea702e..1c39d789449 100644 --- a/go/vt/mysqlproxy/mysqlproxy.go +++ b/go/vt/mysqlproxy/mysqlproxy.go @@ -91,6 +91,13 @@ func (mp *Proxy) Rollback(ctx context.Context, session *ProxySession) error { } func (mp *Proxy) doBegin(ctx context.Context, session *ProxySession) error { + if session.TransactionID != 0 { + err := mp.doCommit(ctx, session) + if err != nil { + return err + } + } + txID, err := mp.qs.Begin(ctx, mp.target, session.Options) if err != nil { return err diff --git a/go/vt/proto/mysqlctl/mysqlctl.pb.go b/go/vt/proto/mysqlctl/mysqlctl.pb.go index b1a19793625..c96a2f9926d 100644 --- a/go/vt/proto/mysqlctl/mysqlctl.pb.go +++ b/go/vt/proto/mysqlctl/mysqlctl.pb.go @@ -16,6 +16,8 @@ It has these top-level messages: RunMysqlUpgradeResponse ReinitConfigRequest ReinitConfigResponse + RefreshConfigRequest + RefreshConfigResponse */ package mysqlctl @@ -119,6 +121,22 @@ func (m *ReinitConfigResponse) String() string { return proto.Compact func (*ReinitConfigResponse) ProtoMessage() {} func (*ReinitConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +type RefreshConfigRequest struct { +} + +func (m *RefreshConfigRequest) Reset() { *m = RefreshConfigRequest{} } +func (m *RefreshConfigRequest) String() string { return proto.CompactTextString(m) } +func (*RefreshConfigRequest) ProtoMessage() {} +func (*RefreshConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +type RefreshConfigResponse struct { +} + +func (m *RefreshConfigResponse) Reset() { *m = RefreshConfigResponse{} } +func (m *RefreshConfigResponse) String() string { return proto.CompactTextString(m) } +func (*RefreshConfigResponse) ProtoMessage() {} +func (*RefreshConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + func init() { proto.RegisterType((*StartRequest)(nil), "mysqlctl.StartRequest") proto.RegisterType((*StartResponse)(nil), "mysqlctl.StartResponse") @@ -128,6 +146,8 @@ func init() { proto.RegisterType((*RunMysqlUpgradeResponse)(nil), "mysqlctl.RunMysqlUpgradeResponse") proto.RegisterType((*ReinitConfigRequest)(nil), "mysqlctl.ReinitConfigRequest") proto.RegisterType((*ReinitConfigResponse)(nil), "mysqlctl.ReinitConfigResponse") + proto.RegisterType((*RefreshConfigRequest)(nil), "mysqlctl.RefreshConfigRequest") + proto.RegisterType((*RefreshConfigResponse)(nil), "mysqlctl.RefreshConfigResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -145,6 +165,7 @@ type MysqlCtlClient interface { Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) RunMysqlUpgrade(ctx context.Context, in *RunMysqlUpgradeRequest, opts ...grpc.CallOption) (*RunMysqlUpgradeResponse, error) ReinitConfig(ctx context.Context, in *ReinitConfigRequest, opts ...grpc.CallOption) (*ReinitConfigResponse, error) + RefreshConfig(ctx context.Context, in *RefreshConfigRequest, opts ...grpc.CallOption) (*RefreshConfigResponse, error) } type mysqlCtlClient struct { @@ -191,6 +212,15 @@ func (c *mysqlCtlClient) ReinitConfig(ctx context.Context, in *ReinitConfigReque return out, nil } +func (c *mysqlCtlClient) RefreshConfig(ctx context.Context, in *RefreshConfigRequest, opts ...grpc.CallOption) (*RefreshConfigResponse, error) { + out := new(RefreshConfigResponse) + err := grpc.Invoke(ctx, "/mysqlctl.MysqlCtl/RefreshConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for MysqlCtl service type MysqlCtlServer interface { @@ -198,6 +228,7 @@ type MysqlCtlServer interface { Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) RunMysqlUpgrade(context.Context, *RunMysqlUpgradeRequest) (*RunMysqlUpgradeResponse, error) ReinitConfig(context.Context, *ReinitConfigRequest) (*ReinitConfigResponse, error) + RefreshConfig(context.Context, *RefreshConfigRequest) (*RefreshConfigResponse, error) } func RegisterMysqlCtlServer(s *grpc.Server, srv MysqlCtlServer) { @@ -276,6 +307,24 @@ func _MysqlCtl_ReinitConfig_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _MysqlCtl_RefreshConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RefreshConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MysqlCtlServer).RefreshConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/mysqlctl.MysqlCtl/RefreshConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MysqlCtlServer).RefreshConfig(ctx, req.(*RefreshConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _MysqlCtl_serviceDesc = grpc.ServiceDesc{ ServiceName: "mysqlctl.MysqlCtl", HandlerType: (*MysqlCtlServer)(nil), @@ -296,6 +345,10 @@ var _MysqlCtl_serviceDesc = grpc.ServiceDesc{ MethodName: "ReinitConfig", Handler: _MysqlCtl_ReinitConfig_Handler, }, + { + MethodName: "RefreshConfig", + Handler: _MysqlCtl_RefreshConfig_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "mysqlctl.proto", @@ -304,24 +357,25 @@ var _MysqlCtl_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("mysqlctl.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 289 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4b, 0xfb, 0x30, - 0x1c, 0xc5, 0x7f, 0xfd, 0x89, 0x52, 0xbf, 0x6e, 0x56, 0xa2, 0x76, 0x5d, 0x41, 0xad, 0x39, 0xc8, - 0x4e, 0x13, 0xf4, 0xa4, 0x37, 0x29, 0x78, 0x13, 0x21, 0x43, 0xf0, 0x56, 0xaa, 0xcd, 0x6a, 0xa1, - 0x26, 0x5d, 0x92, 0x32, 0xfc, 0xc7, 0xfc, 0xfb, 0xc4, 0x34, 0xe9, 0x3a, 0x3b, 0x3d, 0xf6, 0x7d, - 0xdf, 0xfb, 0x94, 0xf7, 0x08, 0xec, 0xbf, 0x7f, 0xc8, 0x45, 0xf9, 0xaa, 0xca, 0x69, 0x25, 0xb8, - 0xe2, 0xc8, 0xb5, 0xdf, 0xf8, 0x12, 0x06, 0x33, 0x95, 0x0a, 0x45, 0xe8, 0xa2, 0xa6, 0x52, 0xa1, - 0x33, 0xd8, 0xd3, 0xb7, 0x2c, 0x49, 0x45, 0x2e, 0x03, 0x27, 0xda, 0x9a, 0xec, 0x12, 0x68, 0xa4, - 0x3b, 0x91, 0x4b, 0xec, 0xc1, 0xd0, 0x04, 0x64, 0xc5, 0x99, 0xa4, 0xf8, 0x06, 0xbc, 0xd9, 0x5b, - 0xad, 0x32, 0xbe, 0x64, 0x16, 0x72, 0x01, 0xde, 0x32, 0x2d, 0x54, 0x32, 0xe7, 0x22, 0x69, 0xa2, - 0x81, 0x13, 0x39, 0x13, 0x97, 0x0c, 0xbf, 0xe5, 0x7b, 0x2e, 0x1e, 0xb4, 0x88, 0x11, 0x1c, 0xac, - 0xa2, 0x06, 0x17, 0x80, 0x4f, 0x6a, 0xa6, 0x0d, 0x4f, 0x55, 0x2e, 0xd2, 0x8c, 0x1a, 0x2a, 0x1e, - 0xc3, 0xa8, 0x77, 0x31, 0xa1, 0x63, 0x38, 0x24, 0xb4, 0x60, 0x85, 0x8a, 0x39, 0x9b, 0x17, 0xb9, - 0x4d, 0xf8, 0x70, 0xb4, 0x2e, 0x37, 0xf6, 0xab, 0xcf, 0xff, 0xe0, 0x6a, 0x4e, 0xac, 0x4a, 0x74, - 0x0b, 0xdb, 0xba, 0x10, 0xf2, 0xa7, 0xed, 0x4a, 0xdd, 0x49, 0xc2, 0x51, 0x4f, 0x37, 0x7f, 0xfd, - 0x87, 0x62, 0x70, 0x6d, 0x01, 0x34, 0xee, 0xd8, 0xd6, 0xf7, 0x08, 0xc3, 0x4d, 0xa7, 0x16, 0xf2, - 0x0c, 0xde, 0x8f, 0x5e, 0x28, 0x5a, 0x05, 0x36, 0x8f, 0x11, 0x9e, 0xff, 0xe1, 0x68, 0xc9, 0x8f, - 0x30, 0xe8, 0xf6, 0x47, 0x27, 0x9d, 0x50, 0x7f, 0xae, 0xf0, 0xf4, 0xb7, 0xb3, 0x05, 0xbe, 0xec, - 0xe8, 0xe7, 0x73, 0xfd, 0x15, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x5e, 0x84, 0xed, 0x50, 0x02, 0x00, - 0x00, + // 313 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x4d, 0x4f, 0x83, 0x30, + 0x18, 0xc7, 0x5d, 0x16, 0x0d, 0x3e, 0x6e, 0x62, 0xaa, 0x1b, 0xac, 0x89, 0x0e, 0x39, 0x98, 0x9d, + 0x66, 0xa2, 0x27, 0xbd, 0x19, 0x12, 0x6f, 0xc6, 0xa4, 0x8b, 0x89, 0x37, 0x82, 0x52, 0x18, 0x09, + 0x52, 0xd6, 0x96, 0x2c, 0x7e, 0x05, 0x3f, 0xb5, 0xb1, 0x14, 0x06, 0x63, 0xf3, 0xc8, 0xff, 0xed, + 0x09, 0x3f, 0x80, 0xd3, 0xaf, 0x6f, 0xb1, 0x4a, 0x3f, 0x65, 0x3a, 0xcf, 0x39, 0x93, 0x0c, 0x19, + 0xd5, 0xb3, 0x7b, 0x0b, 0x83, 0x85, 0x0c, 0xb8, 0x24, 0x74, 0x55, 0x50, 0x21, 0xd1, 0x14, 0x4e, + 0x94, 0x17, 0xfa, 0x01, 0x8f, 0x85, 0xdd, 0x73, 0xfa, 0xb3, 0x63, 0x02, 0xa5, 0xf4, 0xc4, 0x63, + 0xe1, 0x9a, 0x30, 0xd4, 0x05, 0x91, 0xb3, 0x4c, 0x50, 0xf7, 0x01, 0xcc, 0xc5, 0xb2, 0x90, 0x21, + 0x5b, 0x67, 0xd5, 0xc8, 0x0d, 0x98, 0xeb, 0x20, 0x91, 0x7e, 0xc4, 0xb8, 0x5f, 0x56, 0xed, 0x9e, + 0xd3, 0x9b, 0x19, 0x64, 0xf8, 0x27, 0x3f, 0x33, 0xfe, 0xa2, 0x44, 0x17, 0xc1, 0xd9, 0xa6, 0xaa, + 0xe7, 0x6c, 0x18, 0x93, 0x22, 0x53, 0x81, 0xb7, 0x3c, 0xe6, 0x41, 0x48, 0xf5, 0xaa, 0x3b, 0x01, + 0xab, 0xe3, 0xe8, 0xd2, 0x08, 0xce, 0x09, 0x4d, 0xb2, 0x44, 0x7a, 0x2c, 0x8b, 0x92, 0xb8, 0x6a, + 0x8c, 0xe1, 0xa2, 0x2d, 0xeb, 0xb8, 0xd2, 0x23, 0x4e, 0xc5, 0xb2, 0x9d, 0xb7, 0x60, 0xb4, 0xa5, + 0x97, 0x85, 0xbb, 0x9f, 0x3e, 0x18, 0xea, 0xb0, 0x27, 0x53, 0xf4, 0x08, 0x87, 0x8a, 0x00, 0x1a, + 0xcf, 0x6b, 0xac, 0x4d, 0x86, 0xd8, 0xea, 0xe8, 0xfa, 0xee, 0x01, 0xf2, 0xc0, 0xa8, 0xde, 0x18, + 0x4d, 0x1a, 0xb1, 0x36, 0x40, 0x8c, 0x77, 0x59, 0xf5, 0xc8, 0x3b, 0x98, 0x5b, 0x20, 0x90, 0xb3, + 0x29, 0xec, 0xa6, 0x87, 0xaf, 0xff, 0x49, 0xd4, 0xcb, 0xaf, 0x30, 0x68, 0x02, 0x43, 0x97, 0x8d, + 0x52, 0x97, 0x2f, 0xbe, 0xda, 0x67, 0xd7, 0x83, 0x04, 0x86, 0x2d, 0xa2, 0xa8, 0x55, 0xe9, 0x7e, + 0x02, 0x3c, 0xdd, 0xeb, 0x57, 0x9b, 0x1f, 0x47, 0xea, 0x1f, 0xbe, 0xff, 0x0d, 0x00, 0x00, 0xff, + 0xff, 0x81, 0x96, 0x68, 0x13, 0xd5, 0x02, 0x00, 0x00, } diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go index e76e6ead0e2..57391646e24 100644 --- a/go/vt/sqlparser/analyzer.go +++ b/go/vt/sqlparser/analyzer.go @@ -32,6 +32,7 @@ import ( // These constants are used to identify the SQL statement type. const ( StmtSelect = iota + StmtStream StmtInsert StmtReplace StmtUpdate @@ -63,6 +64,8 @@ func Preview(sql string) int { switch loweredFirstWord { case "select": return StmtSelect + case "stream": + return StmtStream case "insert": return StmtInsert case "replace": @@ -87,7 +90,7 @@ func Preview(sql string) int { return StmtRollback } switch loweredFirstWord { - case "create", "alter", "rename", "drop": + case "create", "alter", "rename", "drop", "truncate": return StmtDDL case "set": return StmtSet @@ -95,7 +98,7 @@ func Preview(sql string) int { return StmtShow case "use": return StmtUse - case "analyze", "describe", "desc", "explain", "repair", "optimize", "truncate": + case "analyze", "describe", "desc", "explain", "repair", "optimize": return StmtOther } if strings.Index(trimmed, "/*!") == 0 { @@ -109,6 +112,8 @@ func StmtType(stmtType int) string { switch stmtType { case StmtSelect: return "SELECT" + case StmtStream: + return "STREAM" case StmtInsert: return "INSERT" case StmtReplace: diff --git a/go/vt/sqlparser/analyzer_test.go b/go/vt/sqlparser/analyzer_test.go index c50955d2d47..03199c4b272 100644 --- a/go/vt/sqlparser/analyzer_test.go +++ b/go/vt/sqlparser/analyzer_test.go @@ -65,7 +65,7 @@ func TestPreview(t *testing.T) { {"explain", StmtOther}, {"repair", StmtOther}, {"optimize", StmtOther}, - {"truncate", StmtOther}, + {"truncate", StmtDDL}, {"unknown", StmtUnknown}, {"/* leading comment */ select ...", StmtSelect}, diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index d9edde9b5c4..6b838346a5f 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -181,6 +181,7 @@ type Statement interface { func (*Union) iStatement() {} func (*Select) iStatement() {} +func (*Stream) iStatement() {} func (*Insert) iStatement() {} func (*Update) iStatement() {} func (*Delete) iStatement() {} @@ -402,6 +403,32 @@ func (node *Union) WalkSubtree(visit Visit) error { ) } +// Stream represents a SELECT statement. +type Stream struct { + Comments Comments + SelectExpr SelectExpr + Table TableName +} + +// Format formats the node. +func (node *Stream) Format(buf *TrackedBuffer) { + buf.Myprintf("stream %v%v from %v", + node.Comments, node.SelectExpr, node.Table) +} + +// WalkSubtree walks the nodes of the subtree. +func (node *Stream) WalkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.SelectExpr, + node.Table, + ) +} + // Insert represents an INSERT or REPLACE statement. // Per the MySQL docs, http://dev.mysql.com/doc/refman/5.7/en/replace.html // Replace is the counterpart to `INSERT IGNORE`, and works exactly like a diff --git a/go/vt/sqlparser/comments.go b/go/vt/sqlparser/comments.go index b5ac471df5e..4168c667387 100644 --- a/go/vt/sqlparser/comments.go +++ b/go/vt/sqlparser/comments.go @@ -29,15 +29,16 @@ type matchtracker struct { // SplitTrailingComments splits the query trailing comments from the query. func SplitTrailingComments(sql string) (query, comments string) { + trimmed := strings.TrimRightFunc(sql, unicode.IsSpace) tracker := matchtracker{ - query: sql, - index: len(sql), + query: trimmed, + index: len(trimmed), } pos := tracker.matchComments() if pos >= 0 { return tracker.query[:pos], tracker.query[pos:] } - return sql, "" + return trimmed, "" } // matchComments matches trailing comments. If no comment was found, diff --git a/go/vt/sqlparser/comments_test.go b/go/vt/sqlparser/comments_test.go index d174a9aa10d..fdee23f92bf 100644 --- a/go/vt/sqlparser/comments_test.go +++ b/go/vt/sqlparser/comments_test.go @@ -81,6 +81,10 @@ func TestSplitTrailingComments(t *testing.T) { input: "foo /*** bar ***/", outSQL: "foo", outComments: " /*** bar ***/", + }, { + input: "foo /*** bar ***/ ", + outSQL: "foo", + outComments: " /*** bar ***/", }, { input: "*** bar ***/", outSQL: "*** bar ***/", diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index 492946980d8..f86bb1dcc50 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -1089,6 +1089,10 @@ var ( input: "replace into t partition (p0) values (1, 'asdf')", }, { input: "delete from t partition (p0) where a = 1", + }, { + input: "stream * from t", + }, { + input: "stream /* comment */ * from t", }} ) diff --git a/go/vt/sqlparser/sql.go b/go/vt/sqlparser/sql.go index 9d712fb7e9b..4c4ee4b069e 100644 --- a/go/vt/sqlparser/sql.go +++ b/go/vt/sqlparser/sql.go @@ -95,196 +95,197 @@ type yySymType struct { const LEX_ERROR = 57346 const UNION = 57347 const SELECT = 57348 -const INSERT = 57349 -const UPDATE = 57350 -const DELETE = 57351 -const FROM = 57352 -const WHERE = 57353 -const GROUP = 57354 -const HAVING = 57355 -const ORDER = 57356 -const BY = 57357 -const LIMIT = 57358 -const OFFSET = 57359 -const FOR = 57360 -const ALL = 57361 -const DISTINCT = 57362 -const AS = 57363 -const EXISTS = 57364 -const ASC = 57365 -const DESC = 57366 -const INTO = 57367 -const DUPLICATE = 57368 -const KEY = 57369 -const DEFAULT = 57370 -const SET = 57371 -const LOCK = 57372 -const KEYS = 57373 -const VALUES = 57374 -const LAST_INSERT_ID = 57375 -const NEXT = 57376 -const VALUE = 57377 -const SHARE = 57378 -const MODE = 57379 -const SQL_NO_CACHE = 57380 -const SQL_CACHE = 57381 -const JOIN = 57382 -const STRAIGHT_JOIN = 57383 -const LEFT = 57384 -const RIGHT = 57385 -const INNER = 57386 -const OUTER = 57387 -const CROSS = 57388 -const NATURAL = 57389 -const USE = 57390 -const FORCE = 57391 -const ON = 57392 -const USING = 57393 -const ID = 57394 -const HEX = 57395 -const STRING = 57396 -const INTEGRAL = 57397 -const FLOAT = 57398 -const HEXNUM = 57399 -const VALUE_ARG = 57400 -const LIST_ARG = 57401 -const COMMENT = 57402 -const COMMENT_KEYWORD = 57403 -const BIT_LITERAL = 57404 -const NULL = 57405 -const TRUE = 57406 -const FALSE = 57407 -const OR = 57408 -const AND = 57409 -const NOT = 57410 -const BETWEEN = 57411 -const CASE = 57412 -const WHEN = 57413 -const THEN = 57414 -const ELSE = 57415 -const END = 57416 -const LE = 57417 -const GE = 57418 -const NE = 57419 -const NULL_SAFE_EQUAL = 57420 -const IS = 57421 -const LIKE = 57422 -const REGEXP = 57423 -const IN = 57424 -const SHIFT_LEFT = 57425 -const SHIFT_RIGHT = 57426 -const DIV = 57427 -const MOD = 57428 -const UNARY = 57429 -const COLLATE = 57430 -const BINARY = 57431 -const UNDERSCORE_BINARY = 57432 -const INTERVAL = 57433 -const JSON_EXTRACT_OP = 57434 -const JSON_UNQUOTE_EXTRACT_OP = 57435 -const CREATE = 57436 -const ALTER = 57437 -const DROP = 57438 -const RENAME = 57439 -const ANALYZE = 57440 -const TABLE = 57441 -const INDEX = 57442 -const VIEW = 57443 -const TO = 57444 -const IGNORE = 57445 -const IF = 57446 -const UNIQUE = 57447 -const PRIMARY = 57448 -const SHOW = 57449 -const DESCRIBE = 57450 -const EXPLAIN = 57451 -const DATE = 57452 -const ESCAPE = 57453 -const REPAIR = 57454 -const OPTIMIZE = 57455 -const TRUNCATE = 57456 -const MAXVALUE = 57457 -const PARTITION = 57458 -const REORGANIZE = 57459 -const LESS = 57460 -const THAN = 57461 -const PROCEDURE = 57462 -const TRIGGER = 57463 -const VINDEX = 57464 -const VINDEXES = 57465 -const STATUS = 57466 -const VARIABLES = 57467 -const BIT = 57468 -const TINYINT = 57469 -const SMALLINT = 57470 -const MEDIUMINT = 57471 -const INT = 57472 -const INTEGER = 57473 -const BIGINT = 57474 -const INTNUM = 57475 -const REAL = 57476 -const DOUBLE = 57477 -const FLOAT_TYPE = 57478 -const DECIMAL = 57479 -const NUMERIC = 57480 -const TIME = 57481 -const TIMESTAMP = 57482 -const DATETIME = 57483 -const YEAR = 57484 -const CHAR = 57485 -const VARCHAR = 57486 -const BOOL = 57487 -const CHARACTER = 57488 -const VARBINARY = 57489 -const NCHAR = 57490 -const TEXT = 57491 -const TINYTEXT = 57492 -const MEDIUMTEXT = 57493 -const LONGTEXT = 57494 -const BLOB = 57495 -const TINYBLOB = 57496 -const MEDIUMBLOB = 57497 -const LONGBLOB = 57498 -const JSON = 57499 -const ENUM = 57500 -const NULLX = 57501 -const AUTO_INCREMENT = 57502 -const APPROXNUM = 57503 -const SIGNED = 57504 -const UNSIGNED = 57505 -const ZEROFILL = 57506 -const DATABASES = 57507 -const TABLES = 57508 -const VITESS_KEYSPACES = 57509 -const VITESS_SHARDS = 57510 -const VITESS_TABLETS = 57511 -const VSCHEMA_TABLES = 57512 -const NAMES = 57513 -const CHARSET = 57514 -const GLOBAL = 57515 -const SESSION = 57516 -const CURRENT_TIMESTAMP = 57517 -const DATABASE = 57518 -const CURRENT_DATE = 57519 -const CURRENT_TIME = 57520 -const LOCALTIME = 57521 -const LOCALTIMESTAMP = 57522 -const UTC_DATE = 57523 -const UTC_TIME = 57524 -const UTC_TIMESTAMP = 57525 -const REPLACE = 57526 -const CONVERT = 57527 -const CAST = 57528 -const GROUP_CONCAT = 57529 -const SEPARATOR = 57530 -const MATCH = 57531 -const AGAINST = 57532 -const BOOLEAN = 57533 -const LANGUAGE = 57534 -const WITH = 57535 -const QUERY = 57536 -const EXPANSION = 57537 -const UNUSED = 57538 +const STREAM = 57349 +const INSERT = 57350 +const UPDATE = 57351 +const DELETE = 57352 +const FROM = 57353 +const WHERE = 57354 +const GROUP = 57355 +const HAVING = 57356 +const ORDER = 57357 +const BY = 57358 +const LIMIT = 57359 +const OFFSET = 57360 +const FOR = 57361 +const ALL = 57362 +const DISTINCT = 57363 +const AS = 57364 +const EXISTS = 57365 +const ASC = 57366 +const DESC = 57367 +const INTO = 57368 +const DUPLICATE = 57369 +const KEY = 57370 +const DEFAULT = 57371 +const SET = 57372 +const LOCK = 57373 +const KEYS = 57374 +const VALUES = 57375 +const LAST_INSERT_ID = 57376 +const NEXT = 57377 +const VALUE = 57378 +const SHARE = 57379 +const MODE = 57380 +const SQL_NO_CACHE = 57381 +const SQL_CACHE = 57382 +const JOIN = 57383 +const STRAIGHT_JOIN = 57384 +const LEFT = 57385 +const RIGHT = 57386 +const INNER = 57387 +const OUTER = 57388 +const CROSS = 57389 +const NATURAL = 57390 +const USE = 57391 +const FORCE = 57392 +const ON = 57393 +const USING = 57394 +const ID = 57395 +const HEX = 57396 +const STRING = 57397 +const INTEGRAL = 57398 +const FLOAT = 57399 +const HEXNUM = 57400 +const VALUE_ARG = 57401 +const LIST_ARG = 57402 +const COMMENT = 57403 +const COMMENT_KEYWORD = 57404 +const BIT_LITERAL = 57405 +const NULL = 57406 +const TRUE = 57407 +const FALSE = 57408 +const OR = 57409 +const AND = 57410 +const NOT = 57411 +const BETWEEN = 57412 +const CASE = 57413 +const WHEN = 57414 +const THEN = 57415 +const ELSE = 57416 +const END = 57417 +const LE = 57418 +const GE = 57419 +const NE = 57420 +const NULL_SAFE_EQUAL = 57421 +const IS = 57422 +const LIKE = 57423 +const REGEXP = 57424 +const IN = 57425 +const SHIFT_LEFT = 57426 +const SHIFT_RIGHT = 57427 +const DIV = 57428 +const MOD = 57429 +const UNARY = 57430 +const COLLATE = 57431 +const BINARY = 57432 +const UNDERSCORE_BINARY = 57433 +const INTERVAL = 57434 +const JSON_EXTRACT_OP = 57435 +const JSON_UNQUOTE_EXTRACT_OP = 57436 +const CREATE = 57437 +const ALTER = 57438 +const DROP = 57439 +const RENAME = 57440 +const ANALYZE = 57441 +const TABLE = 57442 +const INDEX = 57443 +const VIEW = 57444 +const TO = 57445 +const IGNORE = 57446 +const IF = 57447 +const UNIQUE = 57448 +const PRIMARY = 57449 +const SHOW = 57450 +const DESCRIBE = 57451 +const EXPLAIN = 57452 +const DATE = 57453 +const ESCAPE = 57454 +const REPAIR = 57455 +const OPTIMIZE = 57456 +const TRUNCATE = 57457 +const MAXVALUE = 57458 +const PARTITION = 57459 +const REORGANIZE = 57460 +const LESS = 57461 +const THAN = 57462 +const PROCEDURE = 57463 +const TRIGGER = 57464 +const VINDEX = 57465 +const VINDEXES = 57466 +const STATUS = 57467 +const VARIABLES = 57468 +const BIT = 57469 +const TINYINT = 57470 +const SMALLINT = 57471 +const MEDIUMINT = 57472 +const INT = 57473 +const INTEGER = 57474 +const BIGINT = 57475 +const INTNUM = 57476 +const REAL = 57477 +const DOUBLE = 57478 +const FLOAT_TYPE = 57479 +const DECIMAL = 57480 +const NUMERIC = 57481 +const TIME = 57482 +const TIMESTAMP = 57483 +const DATETIME = 57484 +const YEAR = 57485 +const CHAR = 57486 +const VARCHAR = 57487 +const BOOL = 57488 +const CHARACTER = 57489 +const VARBINARY = 57490 +const NCHAR = 57491 +const TEXT = 57492 +const TINYTEXT = 57493 +const MEDIUMTEXT = 57494 +const LONGTEXT = 57495 +const BLOB = 57496 +const TINYBLOB = 57497 +const MEDIUMBLOB = 57498 +const LONGBLOB = 57499 +const JSON = 57500 +const ENUM = 57501 +const NULLX = 57502 +const AUTO_INCREMENT = 57503 +const APPROXNUM = 57504 +const SIGNED = 57505 +const UNSIGNED = 57506 +const ZEROFILL = 57507 +const DATABASES = 57508 +const TABLES = 57509 +const VITESS_KEYSPACES = 57510 +const VITESS_SHARDS = 57511 +const VITESS_TABLETS = 57512 +const VSCHEMA_TABLES = 57513 +const NAMES = 57514 +const CHARSET = 57515 +const GLOBAL = 57516 +const SESSION = 57517 +const CURRENT_TIMESTAMP = 57518 +const DATABASE = 57519 +const CURRENT_DATE = 57520 +const CURRENT_TIME = 57521 +const LOCALTIME = 57522 +const LOCALTIMESTAMP = 57523 +const UTC_DATE = 57524 +const UTC_TIME = 57525 +const UTC_TIMESTAMP = 57526 +const REPLACE = 57527 +const CONVERT = 57528 +const CAST = 57529 +const GROUP_CONCAT = 57530 +const SEPARATOR = 57531 +const MATCH = 57532 +const AGAINST = 57533 +const BOOLEAN = 57534 +const LANGUAGE = 57535 +const WITH = 57536 +const QUERY = 57537 +const EXPANSION = 57538 +const UNUSED = 57539 var yyToknames = [...]string{ "$end", @@ -293,6 +294,7 @@ var yyToknames = [...]string{ "LEX_ERROR", "UNION", "SELECT", + "STREAM", "INSERT", "UPDATE", "DELETE", @@ -514,934 +516,920 @@ var yyExca = [...]int{ 1, -1, -2, 0, -1, 3, - 5, 22, + 5, 24, -2, 4, - -1, 202, - 79, 631, - 108, 631, - -2, 47, - -1, 203, - 79, 605, - 108, 605, - -2, 48, - -1, 204, - 79, 595, - 108, 595, - -2, 42, - -1, 206, - 79, 619, - 108, 619, - -2, 44, - -1, 210, - 108, 496, - -2, 492, - -1, 211, - 108, 497, - -2, 493, - -1, 638, - 108, 499, + -1, 197, + 109, 498, + -2, 494, + -1, 198, + 109, 499, -2, 495, - -1, 779, - 5, 22, - -2, 443, - -1, 793, - 5, 23, - -2, 320, - -1, 967, - 5, 23, - -2, 444, - -1, 1015, - 5, 22, + -1, 265, + 80, 633, + 109, 633, + -2, 49, + -1, 266, + 80, 607, + 109, 607, + -2, 50, + -1, 267, + 80, 597, + 109, 597, + -2, 44, + -1, 269, + 80, 621, + 109, 621, + -2, 46, + -1, 629, + 109, 501, + -2, 497, + -1, 808, + 5, 25, + -2, 322, + -1, 828, + 5, 24, + -2, 445, + -1, 992, + 5, 25, -2, 446, - -1, 1062, - 5, 23, - -2, 447, + -1, 1027, + 5, 24, + -2, 448, + -1, 1072, + 5, 25, + -2, 449, } const yyPrivate = 57344 -const yyLast = 8507 +const yyLast = 8292 var yyAct = [...]int{ - 380, 38, 565, 1054, 379, 885, 198, 908, 624, 677, - 664, 680, 353, 886, 641, 973, 432, 173, 882, 429, - 736, 782, 743, 943, 44, 856, 848, 240, 430, 3, - 746, 640, 637, 797, 242, 713, 650, 785, 238, 38, - 167, 408, 818, 342, 745, 760, 402, 178, 201, 348, - 213, 351, 193, 418, 673, 434, 182, 43, 603, 1085, - 1076, 1082, 1071, 1080, 189, 1075, 207, 172, 1070, 956, - 1008, 187, 217, 1031, 577, 168, 169, 170, 171, 814, - 913, 914, 915, 340, 658, 233, 657, 48, 188, 916, - 985, 665, 1037, 1003, 1001, 334, 335, 1079, 1077, 693, - 223, 1055, 748, 838, 604, 625, 627, 224, 50, 51, - 52, 53, 219, 691, 652, 135, 136, 136, 1057, 531, - 530, 540, 541, 533, 534, 535, 536, 537, 538, 539, - 532, 1029, 497, 542, 138, 139, 140, 492, 922, 796, - 697, 795, 214, 794, 215, 819, 235, 220, 237, 690, - 147, 137, 239, 239, 239, 239, 835, 239, 239, 652, - 554, 555, 837, 1047, 239, 520, 993, 234, 236, 970, - 535, 536, 537, 538, 539, 532, 860, 626, 542, 38, - 801, 564, 532, 450, 228, 542, 542, 808, 923, 517, - 449, 331, 332, 333, 431, 336, 337, 687, 692, 685, - 665, 404, 339, 958, 651, 761, 444, 405, 518, 649, - 648, 494, 207, 519, 518, 917, 153, 239, 695, 698, - 960, 812, 239, 232, 520, 1050, 1030, 1028, 446, 1069, - 520, 239, 239, 239, 239, 239, 239, 239, 239, 1058, - 163, 226, 720, 761, 944, 872, 654, 410, 689, 651, - 989, 655, 836, 1064, 834, 491, 718, 719, 717, 988, - 496, 502, 688, 504, 519, 518, 946, 406, 827, 505, - 506, 507, 508, 509, 510, 511, 512, 345, 403, 41, - 737, 520, 738, 706, 708, 709, 826, 694, 707, 716, - 148, 815, 1040, 948, 987, 952, 150, 947, 696, 945, - 211, 156, 152, 825, 950, 533, 534, 535, 536, 537, - 538, 539, 532, 949, 921, 542, 592, 593, 951, 953, - 1066, 341, 154, 1019, 341, 158, 63, 841, 842, 843, - 145, 1019, 1020, 145, 910, 239, 239, 540, 541, 533, - 534, 535, 536, 537, 538, 539, 532, 809, 149, 542, - 551, 553, 145, 145, 982, 981, 902, 341, 145, 969, - 341, 341, 519, 518, 854, 341, 867, 151, 157, 159, - 160, 161, 162, 513, 514, 165, 164, 522, 563, 520, - 739, 567, 568, 569, 570, 571, 572, 573, 490, 576, - 578, 578, 578, 578, 578, 578, 578, 578, 586, 587, - 588, 589, 928, 927, 925, 924, 556, 557, 558, 559, - 560, 561, 562, 521, 519, 518, 193, 193, 193, 193, - 193, 606, 594, 751, 341, 1033, 866, 207, 865, 519, - 518, 520, 431, 230, 628, 225, 145, 214, 145, 1032, - 193, 918, 145, 443, 519, 518, 520, 751, 145, 45, - 523, 783, 63, 63, 63, 63, 207, 63, 63, 631, - 623, 520, 965, 596, 63, 579, 580, 581, 582, 583, - 584, 585, 415, 341, 19, 666, 667, 668, 636, 642, - 619, 608, 609, 566, 611, 638, 634, 145, 854, 595, - 575, 629, 633, 415, 145, 145, 145, 630, 415, 679, - 1014, 63, 239, 645, 607, 452, 451, 610, 441, 883, - 881, 926, 443, 621, 622, 783, 854, 63, 802, 145, - 41, 145, 63, 19, 145, 590, 414, 145, 41, 145, - 714, 63, 63, 63, 63, 63, 63, 63, 63, 659, - 701, 675, 676, 678, 660, 661, 662, 663, 179, 442, - 415, 440, 635, 854, 38, 896, 681, 443, 134, 670, - 671, 672, 368, 367, 370, 371, 372, 373, 567, 41, - 805, 369, 374, 674, 712, 786, 787, 721, 722, 723, - 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, - 734, 735, 750, 669, 41, 55, 623, 19, 912, 638, - 883, 828, 789, 500, 780, 781, 740, 741, 338, 186, - 765, 616, 792, 753, 754, 758, 617, 757, 17, 602, - 777, 703, 704, 778, 710, 711, 618, 791, 424, 425, - 613, 764, 779, 766, 767, 63, 63, 768, 769, 145, - 614, 612, 1078, 41, 1074, 615, 775, 840, 790, 752, - 183, 184, 799, 800, 702, 803, 1073, 420, 423, 424, - 425, 421, 763, 422, 426, 177, 774, 773, 642, 820, - 566, 816, 817, 755, 756, 343, 409, 448, 420, 423, - 424, 425, 421, 239, 422, 426, 807, 344, 786, 787, - 407, 231, 811, 403, 1052, 1051, 355, 1012, 806, 963, - 991, 239, 683, 499, 428, 180, 181, 793, 63, 821, - 822, 823, 409, 145, 174, 772, 145, 145, 145, 145, - 145, 830, 714, 771, 1043, 831, 175, 45, 145, 1042, - 1011, 783, 145, 1044, 986, 516, 145, 47, 49, 839, - 145, 145, 439, 42, 1, 686, 1053, 907, 192, 647, - 639, 861, 63, 844, 530, 540, 541, 533, 534, 535, - 536, 537, 538, 539, 532, 212, 54, 542, 845, 846, - 847, 646, 824, 1027, 984, 653, 813, 656, 911, 888, - 1049, 38, 810, 887, 884, 455, 456, 454, 458, 853, - 207, 457, 453, 145, 155, 898, 899, 900, 145, 199, - 427, 145, 63, 869, 445, 871, 855, 893, 56, 889, - 833, 832, 684, 218, 550, 892, 770, 200, 890, 906, - 591, 401, 1041, 851, 903, 1010, 870, 852, 574, 904, - 642, 759, 642, 354, 905, 705, 366, 863, 864, 363, - 365, 868, 364, 597, 776, 524, 874, 352, 875, 876, - 877, 878, 377, 346, 862, 191, 411, 419, 417, 919, - 920, 416, 196, 63, 873, 788, 784, 190, 880, 1007, - 1056, 601, 933, 20, 931, 46, 185, 63, 61, 16, - 15, 901, 193, 939, 942, 938, 894, 954, 750, 895, - 192, 955, 897, 941, 957, 638, 14, 13, 24, 935, - 936, 962, 961, 964, 12, 11, 208, 10, 9, 8, - 7, 972, 6, 5, 975, 976, 977, 4, 978, 176, - 803, 980, 18, 2, 0, 0, 0, 0, 0, 63, - 239, 0, 0, 642, 0, 0, 0, 937, 0, 0, - 0, 63, 531, 530, 540, 541, 533, 534, 535, 536, - 537, 538, 539, 532, 0, 0, 542, 1006, 999, 0, - 0, 0, 0, 0, 0, 0, 888, 0, 990, 1016, - 887, 0, 966, 967, 968, 959, 971, 0, 0, 1013, - 849, 0, 63, 63, 992, 0, 0, 1024, 0, 0, - 1026, 566, 1034, 1025, 983, 0, 0, 1015, 0, 0, - 0, 63, 0, 0, 241, 241, 241, 241, 1036, 241, - 241, 0, 0, 0, 0, 888, 241, 38, 0, 887, - 1045, 904, 0, 0, 0, 0, 994, 995, 0, 996, - 997, 0, 998, 0, 0, 1000, 0, 1002, 1004, 1005, - 0, 0, 0, 0, 0, 1046, 1060, 552, 0, 63, - 0, 1061, 208, 241, 0, 0, 1038, 207, 0, 1021, - 1022, 1023, 0, 1009, 0, 0, 0, 0, 0, 241, - 1072, 145, 0, 0, 241, 0, 0, 0, 0, 0, - 1081, 63, 63, 241, 241, 241, 241, 241, 241, 241, - 241, 1039, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 63, 63, 0, 63, 63, 0, 0, 0, - 0, 194, 192, 192, 192, 192, 192, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 192, 145, - 1062, 1083, 0, 145, 0, 0, 192, 1065, 0, 63, - 1068, 142, 0, 0, 0, 1059, 566, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 63, 0, - 0, 0, 0, 0, 197, 0, 0, 1086, 1087, 216, - 0, 0, 0, 341, 378, 0, 0, 0, 0, 0, - 0, 0, 145, 0, 0, 0, 0, 241, 241, 0, - 0, 0, 0, 0, 0, 0, 0, 63, 0, 63, - 63, 63, 145, 63, 143, 0, 63, 166, 0, 531, - 530, 540, 541, 533, 534, 535, 536, 537, 538, 539, - 532, 0, 715, 542, 0, 0, 143, 143, 209, 0, - 63, 0, 143, 0, 531, 530, 540, 541, 533, 534, - 535, 536, 537, 538, 539, 532, 0, 221, 542, 222, - 0, 0, 0, 227, 0, 0, 0, 0, 0, 229, - 598, 0, 0, 0, 0, 0, 0, 208, 0, 0, - 63, 63, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 63, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 63, 0, 208, 0, 413, 0, - 0, 0, 0, 241, 241, 0, 0, 438, 0, 0, - 143, 0, 143, 0, 0, 0, 143, 0, 0, 0, - 63, 0, 143, 0, 0, 0, 0, 0, 0, 0, - 493, 0, 495, 0, 0, 498, 0, 0, 501, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 63, 0, - 0, 0, 0, 0, 241, 0, 63, 0, 0, 0, - 0, 143, 0, 0, 19, 39, 21, 22, 143, 436, - 143, 0, 0, 0, 209, 0, 0, 0, 0, 0, - 0, 0, 33, 0, 0, 0, 0, 23, 0, 0, - 0, 0, 0, 143, 0, 143, 0, 0, 143, 0, - 0, 143, 0, 503, 0, 0, 32, 0, 0, 0, - 41, 0, 0, 0, 715, 742, 0, 241, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 762, - 0, 0, 0, 0, 0, 526, 0, 529, 0, 0, - 0, 0, 0, 543, 544, 545, 546, 547, 548, 549, - 515, 527, 528, 525, 531, 530, 540, 541, 533, 534, - 535, 536, 537, 538, 539, 532, 0, 0, 542, 25, - 26, 28, 27, 30, 934, 0, 0, 0, 0, 0, - 0, 798, 31, 34, 35, 0, 0, 36, 37, 29, - 0, 0, 0, 241, 531, 530, 540, 541, 533, 534, - 535, 536, 537, 538, 539, 532, 0, 0, 542, 0, - 0, 0, 0, 143, 0, 0, 850, 0, 0, 0, - 0, 0, 0, 0, 605, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 829, 241, 531, 530, 540, 541, - 533, 534, 535, 536, 537, 538, 539, 532, 0, 0, - 542, 0, 632, 241, 0, 0, 0, 0, 0, 40, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 192, 0, - 0, 0, 0, 0, 0, 0, 0, 143, 0, 209, - 143, 143, 143, 143, 143, 0, 0, 0, 0, 0, - 0, 858, 620, 0, 682, 0, 143, 0, 0, 699, - 436, 0, 700, 0, 143, 143, 0, 0, 209, 0, - 0, 0, 0, 0, 0, 503, 0, 0, 0, 0, - 208, 0, 0, 891, 798, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 241, 241, 0, 241, 909, 0, - 0, 0, 0, 0, 0, 0, 0, 143, 0, 0, - 0, 0, 143, 0, 0, 143, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 932, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 858, 0, 0, 241, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 749, 503, - 0, 0, 0, 749, 749, 461, 0, 749, 0, 974, - 0, 974, 974, 974, 0, 979, 0, 0, 241, 0, - 0, 749, 749, 749, 749, 0, 0, 473, 0, 0, - 0, 0, 0, 0, 0, 0, 749, 0, 0, 0, - 0, 0, 241, 478, 479, 480, 481, 482, 483, 484, - 0, 485, 486, 487, 488, 489, 474, 475, 476, 477, - 459, 460, 0, 0, 462, 0, 463, 464, 465, 466, - 467, 468, 469, 470, 471, 472, 0, 0, 0, 0, - 0, 0, 1017, 1018, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 909, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 241, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1048, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 879, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 208, 0, 0, - 1063, 0, 0, 0, 0, 0, 0, 0, 1067, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 749, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 749, 0, 0, 0, 0, 0, 0, - 929, 0, 0, 0, 930, 143, 0, 0, 0, 0, - 0, 0, 209, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 143, 0, 0, 0, 143, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 749, - 0, 0, 0, 0, 0, 503, 749, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 143, 0, 0, 0, + 228, 40, 1063, 922, 470, 227, 944, 202, 261, 758, + 469, 3, 923, 998, 703, 515, 176, 719, 919, 513, + 170, 831, 845, 654, 718, 901, 800, 661, 834, 742, + 755, 664, 728, 676, 403, 46, 631, 409, 274, 502, + 415, 40, 751, 684, 517, 251, 301, 716, 200, 181, + 303, 175, 423, 299, 866, 256, 185, 171, 172, 173, + 174, 45, 1089, 1080, 270, 736, 663, 1087, 1070, 1085, + 1079, 914, 482, 986, 250, 278, 1043, 264, 862, 735, + 294, 1010, 1020, 50, 189, 743, 981, 771, 979, 1084, + 1069, 949, 950, 951, 395, 396, 1086, 1064, 886, 685, + 952, 769, 285, 280, 139, 52, 53, 54, 55, 56, + 284, 730, 138, 252, 139, 580, 436, 435, 445, 446, + 438, 439, 440, 441, 442, 443, 444, 437, 775, 1041, + 447, 704, 706, 141, 142, 143, 575, 768, 844, 856, + 843, 296, 867, 298, 204, 842, 276, 281, 150, 140, + 459, 460, 1056, 195, 801, 300, 300, 300, 300, 995, + 300, 300, 295, 297, 849, 794, 603, 300, 1051, 436, + 435, 445, 446, 438, 439, 440, 441, 442, 443, 444, + 437, 447, 40, 447, 533, 765, 770, 763, 427, 289, + 743, 437, 412, 600, 447, 421, 420, 456, 422, 255, + 458, 729, 958, 705, 532, 420, 773, 776, 916, 392, + 393, 394, 422, 397, 398, 411, 677, 677, 293, 818, + 400, 422, 577, 860, 1042, 1040, 953, 468, 730, 472, + 473, 474, 475, 476, 477, 478, 767, 481, 483, 483, + 483, 483, 483, 483, 483, 483, 491, 492, 493, 494, + 766, 1068, 959, 638, 287, 732, 275, 514, 1059, 417, + 733, 20, 21, 41, 23, 24, 413, 636, 637, 635, + 270, 527, 1014, 1013, 198, 772, 883, 43, 300, 655, + 35, 656, 885, 300, 875, 25, 774, 634, 874, 1052, + 863, 1074, 300, 300, 300, 300, 300, 300, 300, 300, + 1023, 1012, 66, 873, 34, 957, 148, 946, 43, 148, + 529, 484, 485, 486, 487, 488, 489, 490, 729, 857, + 621, 623, 624, 727, 726, 622, 421, 420, 813, 148, + 148, 574, 657, 918, 573, 148, 579, 585, 226, 291, + 587, 1076, 402, 422, 457, 588, 589, 590, 591, 592, + 593, 594, 595, 791, 792, 793, 1031, 402, 402, 461, + 462, 463, 464, 465, 466, 467, 64, 27, 28, 30, + 29, 32, 884, 286, 882, 275, 421, 420, 1031, 1032, + 33, 36, 37, 1007, 1006, 38, 39, 31, 939, 402, + 994, 402, 1045, 422, 1044, 271, 300, 300, 964, 963, + 954, 255, 445, 446, 438, 439, 440, 441, 442, 443, + 444, 437, 526, 148, 447, 148, 902, 606, 607, 148, + 832, 812, 666, 811, 990, 148, 961, 960, 608, 66, + 66, 66, 66, 47, 66, 66, 632, 499, 904, 421, + 420, 66, 832, 440, 441, 442, 443, 444, 437, 596, + 597, 447, 806, 402, 499, 402, 422, 42, 962, 40, + 850, 401, 499, 421, 420, 906, 66, 910, 498, 905, + 610, 903, 806, 472, 628, 627, 908, 625, 629, 891, + 422, 666, 402, 668, 526, 907, 535, 534, 20, 20, + 909, 911, 499, 302, 302, 302, 302, 806, 302, 302, + 256, 256, 256, 256, 256, 302, 270, 687, 658, 659, + 524, 920, 604, 826, 526, 514, 827, 707, 43, 674, + 681, 759, 806, 256, 148, 668, 737, 756, 137, 182, + 425, 148, 148, 148, 270, 43, 43, 933, 66, 853, + 669, 670, 710, 752, 673, 688, 835, 836, 691, 713, + 700, 525, 66, 523, 148, 747, 148, 66, 680, 148, + 682, 683, 148, 708, 148, 709, 66, 66, 66, 66, + 66, 66, 66, 66, 723, 633, 43, 744, 745, 746, + 715, 720, 249, 630, 629, 300, 639, 640, 641, 642, + 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, + 653, 271, 302, 757, 712, 58, 948, 920, 738, 739, + 740, 741, 753, 754, 689, 690, 302, 692, 876, 838, + 583, 302, 20, 748, 749, 750, 399, 602, 18, 795, + 302, 302, 302, 302, 302, 302, 302, 302, 779, 616, + 841, 632, 783, 784, 255, 255, 255, 255, 255, 1026, + 628, 697, 695, 840, 629, 694, 698, 696, 699, 255, + 508, 509, 693, 601, 186, 187, 1083, 255, 1078, 43, + 66, 66, 796, 888, 148, 780, 1082, 180, 416, 421, + 420, 789, 790, 788, 868, 829, 830, 1061, 531, 66, + 292, 859, 414, 148, 1060, 828, 422, 1024, 854, 66, + 438, 439, 440, 441, 442, 443, 444, 437, 988, 817, + 447, 404, 217, 216, 219, 220, 221, 222, 761, 1016, + 839, 218, 223, 405, 582, 787, 512, 183, 184, 805, + 416, 847, 848, 786, 302, 302, 851, 177, 1049, 178, + 47, 815, 66, 1048, 504, 507, 508, 509, 505, 1018, + 506, 510, 832, 612, 835, 836, 66, 418, 1053, 1011, + 855, 300, 599, 425, 49, 720, 302, 51, 864, 865, + 522, 148, 44, 1, 148, 148, 148, 148, 148, 300, + 633, 764, 1062, 879, 943, 725, 148, 717, 273, 148, + 797, 798, 799, 148, 57, 724, 872, 148, 148, 869, + 870, 871, 1039, 1009, 731, 861, 660, 734, 947, 66, + 1058, 858, 538, 539, 878, 504, 507, 508, 509, 505, + 678, 506, 510, 894, 895, 537, 541, 540, 925, 536, + 40, 158, 887, 924, 270, 921, 912, 271, 900, 913, + 926, 893, 262, 935, 936, 937, 511, 528, 419, 59, + 148, 915, 881, 929, 880, 148, 762, 279, 148, 66, + 455, 785, 263, 927, 191, 271, 605, 930, 408, 1047, + 942, 1017, 302, 302, 609, 941, 816, 479, 675, 203, + 620, 215, 212, 940, 214, 213, 611, 825, 429, 201, + 193, 254, 256, 495, 503, 501, 893, 720, 500, 720, + 259, 837, 967, 833, 253, 890, 985, 1050, 615, 22, + 48, 970, 969, 188, 17, 984, 16, 15, 14, 955, + 956, 26, 13, 302, 12, 11, 10, 9, 8, 7, + 6, 665, 667, 5, 4, 179, 19, 977, 2, 0, + 989, 0, 302, 0, 0, 679, 0, 997, 0, 1000, + 1001, 1002, 0, 0, 1003, 897, 898, 1005, 0, 851, + 0, 0, 0, 0, 0, 0, 300, 0, 974, 975, + 0, 976, 0, 0, 978, 702, 980, 0, 0, 0, + 0, 0, 66, 0, 0, 0, 0, 0, 720, 0, + 0, 925, 0, 66, 1028, 0, 924, 0, 0, 0, + 0, 0, 0, 1025, 1027, 1019, 0, 0, 0, 0, + 0, 1008, 0, 0, 1038, 0, 1037, 1046, 1036, 1015, + 0, 0, 0, 0, 0, 0, 0, 925, 0, 40, + 1054, 0, 924, 0, 66, 66, 255, 0, 0, 1055, + 0, 0, 406, 410, 0, 0, 846, 0, 0, 0, + 972, 0, 0, 66, 1066, 0, 0, 302, 0, 428, + 148, 270, 1071, 0, 0, 0, 0, 156, 0, 66, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1081, + 0, 0, 0, 0, 0, 0, 0, 1088, 0, 0, + 0, 166, 0, 471, 0, 0, 0, 0, 877, 302, + 480, 0, 0, 0, 66, 66, 0, 0, 0, 0, + 0, 0, 0, 0, 257, 0, 0, 302, 0, 803, + 0, 0, 0, 804, 66, 66, 0, 66, 66, 0, + 808, 809, 810, 302, 0, 814, 0, 0, 1021, 0, + 820, 151, 821, 822, 823, 824, 145, 153, 407, 0, + 0, 148, 159, 155, 0, 148, 0, 0, 0, 0, + 0, 66, 0, 0, 0, 271, 148, 0, 928, 846, + 260, 0, 0, 157, 0, 277, 161, 0, 0, 0, + 146, 0, 0, 169, 0, 0, 0, 0, 302, 302, + 0, 302, 945, 0, 0, 0, 0, 0, 0, 152, + 0, 192, 0, 146, 146, 272, 0, 0, 66, 146, + 66, 66, 66, 148, 66, 0, 66, 0, 154, 160, + 162, 163, 164, 165, 0, 968, 168, 167, 435, 445, + 446, 438, 439, 440, 441, 442, 443, 444, 437, 0, + 66, 447, 1090, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 282, 0, 283, 0, 0, 0, 288, + 0, 0, 0, 0, 0, 290, 0, 899, 0, 66, + 66, 0, 999, 402, 999, 999, 999, 0, 1004, 0, + 302, 0, 66, 0, 0, 618, 619, 146, 0, 146, + 0, 0, 0, 146, 0, 0, 0, 0, 0, 146, + 0, 0, 0, 0, 302, 0, 66, 938, 0, 436, + 435, 445, 446, 438, 439, 440, 441, 442, 443, 444, + 437, 0, 0, 447, 0, 0, 0, 0, 0, 0, + 0, 66, 0, 1029, 1030, 0, 0, 471, 0, 66, + 671, 672, 0, 0, 0, 0, 945, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 971, 0, 0, 0, + 0, 0, 0, 973, 497, 0, 0, 0, 0, 0, + 1057, 0, 0, 521, 982, 983, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 991, 992, 993, + 0, 996, 271, 896, 576, 1073, 578, 714, 146, 581, + 0, 0, 584, 1077, 0, 146, 519, 146, 0, 0, + 0, 272, 0, 436, 435, 445, 446, 438, 439, 440, + 441, 442, 443, 444, 437, 0, 0, 447, 146, 0, + 146, 0, 544, 146, 0, 0, 146, 0, 586, 0, + 0, 0, 0, 0, 0, 0, 0, 1022, 0, 0, + 0, 0, 0, 0, 556, 0, 0, 0, 0, 0, + 0, 0, 1033, 1034, 1035, 0, 781, 782, 0, 410, + 561, 562, 563, 564, 565, 566, 567, 0, 568, 569, + 570, 571, 572, 557, 558, 559, 560, 542, 543, 0, + 0, 545, 0, 546, 547, 548, 549, 550, 551, 552, + 553, 554, 555, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 1067, 0, 598, 0, 0, 1072, 0, 0, + 0, 0, 0, 0, 1075, 0, 0, 0, 0, 0, + 0, 807, 0, 617, 0, 0, 0, 0, 0, 0, + 0, 0, 819, 0, 0, 0, 0, 0, 146, 0, + 0, 0, 1092, 1093, 0, 0, 0, 0, 0, 0, + 0, 431, 0, 434, 0, 0, 0, 146, 0, 448, + 449, 450, 451, 452, 453, 454, 586, 432, 433, 430, + 436, 435, 445, 446, 438, 439, 440, 441, 442, 443, + 444, 437, 0, 0, 447, 436, 435, 445, 446, 438, + 439, 440, 441, 442, 443, 444, 437, 0, 0, 447, + 0, 686, 0, 0, 0, 0, 0, 192, 0, 0, + 0, 0, 192, 192, 0, 0, 192, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 711, 0, + 192, 192, 192, 192, 0, 146, 0, 272, 146, 146, + 146, 146, 146, 0, 0, 0, 0, 0, 0, 0, + 701, 0, 0, 146, 0, 0, 0, 519, 0, 0, + 0, 146, 146, 0, 0, 272, 0, 0, 0, 0, + 0, 0, 586, 917, 802, 0, 0, 0, 0, 0, + 760, 0, 0, 0, 0, 777, 0, 931, 778, 0, + 932, 0, 0, 934, 436, 435, 445, 446, 438, 439, + 440, 441, 442, 443, 444, 437, 0, 0, 447, 0, + 0, 0, 0, 0, 146, 101, 0, 0, 0, 146, + 0, 0, 146, 0, 80, 0, 0, 0, 0, 88, + 0, 90, 0, 0, 111, 97, 0, 0, 0, 0, + 0, 0, 586, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 65, 192, 0, 0, 0, 0, 0, + 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 987, 0, 0, 0, 0, 0, 0, + 471, 0, 0, 0, 0, 0, 0, 0, 436, 435, + 445, 446, 438, 439, 440, 441, 442, 443, 444, 437, + 0, 192, 447, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 192, 0, 0, 149, 0, 0, 0, + 0, 104, 0, 0, 0, 76, 0, 109, 102, 0, + 0, 103, 108, 91, 116, 105, 122, 128, 129, 114, + 127, 69, 120, 113, 95, 85, 86, 68, 0, 107, + 79, 83, 78, 100, 117, 118, 77, 135, 72, 126, + 71, 73, 125, 99, 115, 121, 96, 93, 70, 119, + 94, 92, 87, 81, 0, 0, 0, 112, 123, 136, + 0, 0, 130, 131, 132, 133, 98, 74, 84, 110, + 889, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1065, 471, 67, 0, 89, 134, 106, + 82, 124, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 146, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 192, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 436, 0, 0, 0, + 0, 0, 0, 0, 0, 272, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 320, 310, 283, 322, 261, 275, 330, - 276, 277, 304, 249, 291, 98, 273, 0, 264, 244, - 270, 245, 262, 285, 77, 288, 260, 312, 294, 85, - 328, 87, 299, 0, 108, 94, 0, 0, 287, 314, - 289, 309, 282, 305, 254, 298, 323, 274, 302, 324, - 0, 0, 0, 62, 0, 643, 644, 0, 0, 0, - 0, 0, 72, 0, 301, 319, 272, 303, 243, 300, - 0, 247, 250, 329, 317, 267, 268, 804, 0, 0, - 0, 0, 0, 0, 286, 290, 306, 280, 0, 0, - 0, 0, 0, 0, 0, 0, 265, 0, 297, 0, - 0, 0, 251, 248, 0, 284, 0, 0, 0, 253, - 0, 266, 307, 0, 315, 281, 146, 318, 279, 278, - 321, 101, 313, 263, 271, 73, 269, 106, 99, 209, - 296, 100, 105, 88, 113, 102, 119, 125, 126, 111, - 124, 66, 117, 110, 92, 82, 83, 65, 0, 104, - 76, 80, 75, 97, 114, 115, 74, 132, 69, 123, - 68, 70, 122, 96, 112, 118, 93, 90, 67, 116, - 91, 89, 84, 78, 0, 246, 0, 109, 120, 133, - 259, 316, 127, 128, 129, 130, 95, 71, 81, 107, - 257, 258, 255, 256, 292, 293, 325, 326, 327, 308, - 252, 0, 0, 311, 295, 64, 0, 86, 131, 103, - 79, 121, 320, 310, 283, 322, 261, 275, 330, 276, - 277, 304, 249, 291, 98, 273, 0, 264, 244, 270, - 245, 262, 285, 77, 288, 260, 312, 294, 85, 328, - 87, 299, 0, 108, 94, 0, 0, 287, 314, 289, - 309, 282, 305, 254, 298, 323, 274, 302, 324, 0, - 0, 0, 62, 0, 643, 644, 0, 0, 0, 0, - 0, 72, 0, 301, 319, 272, 303, 243, 300, 0, - 247, 250, 329, 317, 267, 268, 0, 0, 0, 0, - 0, 0, 0, 286, 290, 306, 280, 0, 0, 0, - 0, 0, 0, 0, 0, 265, 0, 297, 0, 0, - 0, 251, 248, 0, 284, 0, 0, 0, 253, 0, - 266, 307, 0, 315, 281, 146, 318, 279, 278, 321, - 101, 313, 263, 271, 73, 269, 106, 99, 0, 296, - 100, 105, 88, 113, 102, 119, 125, 126, 111, 124, - 66, 117, 110, 92, 82, 83, 65, 0, 104, 76, - 80, 75, 97, 114, 115, 74, 132, 69, 123, 68, - 70, 122, 96, 112, 118, 93, 90, 67, 116, 91, - 89, 84, 78, 0, 246, 0, 109, 120, 133, 259, - 316, 127, 128, 129, 130, 95, 71, 81, 107, 257, - 258, 255, 256, 292, 293, 325, 326, 327, 308, 252, - 0, 0, 311, 295, 64, 0, 86, 131, 103, 79, - 121, 320, 310, 283, 322, 261, 275, 330, 276, 277, - 304, 249, 291, 98, 273, 0, 264, 244, 270, 245, - 262, 285, 77, 288, 260, 312, 294, 85, 328, 87, - 299, 0, 108, 94, 0, 0, 287, 314, 289, 309, - 282, 305, 254, 298, 323, 274, 302, 324, 0, 0, - 0, 62, 0, 0, 0, 0, 0, 0, 0, 0, - 72, 0, 301, 319, 272, 303, 243, 300, 0, 247, - 250, 329, 317, 267, 268, 0, 0, 0, 0, 0, - 0, 0, 286, 290, 306, 280, 0, 0, 0, 0, - 0, 0, 1035, 0, 265, 0, 297, 0, 0, 0, - 251, 248, 0, 284, 0, 0, 0, 253, 0, 266, - 307, 0, 315, 281, 146, 318, 279, 278, 321, 101, - 313, 263, 271, 73, 269, 106, 99, 0, 296, 100, - 105, 88, 113, 102, 119, 125, 126, 111, 124, 66, - 117, 110, 92, 82, 83, 65, 0, 104, 76, 80, - 75, 97, 114, 115, 74, 132, 69, 123, 68, 70, - 122, 96, 112, 118, 93, 90, 67, 116, 91, 89, - 84, 78, 0, 246, 0, 109, 120, 133, 259, 316, - 127, 128, 129, 130, 95, 71, 81, 107, 257, 258, - 255, 256, 292, 293, 325, 326, 327, 308, 252, 0, - 0, 311, 295, 64, 0, 86, 131, 103, 79, 121, - 320, 310, 283, 322, 261, 275, 330, 276, 277, 304, - 249, 291, 98, 273, 0, 264, 244, 270, 245, 262, - 285, 77, 288, 260, 312, 294, 85, 328, 87, 299, - 0, 108, 94, 0, 0, 287, 314, 289, 309, 282, - 305, 254, 298, 323, 274, 302, 324, 41, 0, 0, - 62, 0, 0, 0, 0, 0, 0, 0, 0, 72, - 0, 301, 319, 272, 303, 243, 300, 0, 247, 250, - 329, 317, 267, 268, 0, 0, 0, 0, 0, 0, - 0, 286, 290, 306, 280, 0, 0, 0, 0, 0, - 0, 0, 0, 265, 0, 297, 0, 0, 0, 251, - 248, 0, 284, 0, 0, 0, 253, 0, 266, 307, - 0, 315, 281, 146, 318, 279, 278, 321, 101, 313, - 263, 271, 73, 269, 106, 99, 0, 296, 100, 105, - 88, 113, 102, 119, 125, 126, 111, 124, 66, 117, - 110, 92, 82, 83, 65, 0, 104, 76, 80, 75, - 97, 114, 115, 74, 132, 69, 123, 68, 70, 122, - 96, 112, 118, 93, 90, 67, 116, 91, 89, 84, - 78, 0, 246, 0, 109, 120, 133, 259, 316, 127, - 128, 129, 130, 95, 71, 81, 107, 257, 258, 255, - 256, 292, 293, 325, 326, 327, 308, 252, 0, 0, - 311, 295, 64, 0, 86, 131, 103, 79, 121, 320, - 310, 283, 322, 261, 275, 330, 276, 277, 304, 249, - 291, 98, 273, 0, 264, 244, 270, 245, 262, 285, - 77, 288, 260, 312, 294, 85, 328, 87, 299, 0, - 108, 94, 0, 0, 287, 314, 289, 309, 282, 305, - 254, 298, 323, 274, 302, 324, 0, 0, 0, 210, - 0, 0, 0, 0, 0, 0, 0, 0, 72, 0, - 301, 319, 272, 303, 243, 300, 0, 247, 250, 329, - 317, 267, 268, 0, 0, 0, 0, 0, 0, 0, - 286, 290, 306, 280, 0, 0, 0, 0, 0, 0, - 940, 0, 265, 0, 297, 0, 0, 0, 251, 248, - 0, 284, 0, 0, 0, 253, 0, 266, 307, 0, - 315, 281, 146, 318, 279, 278, 321, 101, 313, 263, - 271, 73, 269, 106, 99, 0, 296, 100, 105, 88, - 113, 102, 119, 125, 126, 111, 124, 66, 117, 110, - 92, 82, 83, 65, 0, 104, 76, 80, 75, 97, - 114, 115, 74, 132, 69, 123, 68, 70, 122, 96, - 112, 118, 93, 90, 67, 116, 91, 89, 84, 78, - 0, 246, 0, 109, 120, 133, 259, 316, 127, 128, - 129, 130, 95, 71, 81, 107, 257, 258, 255, 256, - 292, 293, 325, 326, 327, 308, 252, 0, 0, 311, - 295, 64, 0, 86, 131, 103, 79, 121, 320, 310, - 283, 322, 261, 275, 330, 276, 277, 304, 249, 291, - 98, 273, 0, 264, 244, 270, 245, 262, 285, 77, - 288, 260, 312, 294, 85, 328, 87, 299, 0, 108, - 94, 0, 0, 287, 314, 289, 309, 282, 305, 254, - 298, 323, 274, 302, 324, 0, 0, 0, 62, 0, - 447, 0, 0, 0, 0, 0, 0, 72, 0, 301, - 319, 272, 303, 243, 300, 0, 247, 250, 329, 317, - 267, 268, 0, 0, 0, 0, 0, 0, 0, 286, - 290, 306, 280, 0, 0, 0, 0, 0, 0, 0, - 0, 265, 0, 297, 0, 0, 0, 251, 248, 0, - 284, 0, 0, 0, 253, 0, 266, 307, 0, 315, - 281, 146, 318, 279, 278, 321, 101, 313, 263, 271, - 73, 269, 106, 99, 0, 296, 100, 105, 88, 113, - 102, 119, 125, 126, 111, 124, 66, 117, 110, 92, - 82, 83, 65, 0, 104, 76, 80, 75, 97, 114, - 115, 74, 132, 69, 123, 68, 70, 122, 96, 112, - 118, 93, 90, 67, 116, 91, 89, 84, 78, 0, - 246, 0, 109, 120, 133, 259, 316, 127, 128, 129, - 130, 95, 71, 81, 107, 257, 258, 255, 256, 292, - 293, 325, 326, 327, 308, 252, 0, 0, 311, 295, - 64, 0, 86, 131, 103, 79, 121, 320, 310, 283, - 322, 261, 275, 330, 276, 277, 304, 249, 291, 98, - 273, 0, 264, 244, 270, 245, 262, 285, 77, 288, - 260, 312, 294, 85, 328, 87, 299, 0, 108, 94, - 0, 0, 287, 314, 289, 309, 282, 305, 254, 298, - 323, 274, 302, 324, 0, 0, 0, 62, 0, 0, - 0, 0, 0, 0, 0, 0, 72, 0, 301, 319, - 272, 303, 243, 300, 0, 247, 250, 329, 317, 267, - 268, 0, 0, 0, 0, 0, 0, 0, 286, 290, - 306, 280, 0, 0, 0, 0, 0, 0, 0, 0, - 265, 0, 297, 0, 0, 0, 251, 248, 0, 284, - 0, 0, 0, 253, 0, 266, 307, 0, 315, 281, - 146, 318, 279, 278, 321, 101, 313, 263, 271, 73, - 269, 106, 99, 0, 296, 100, 105, 88, 113, 102, - 119, 125, 126, 111, 124, 66, 117, 110, 92, 82, - 83, 65, 0, 104, 76, 80, 75, 97, 114, 115, - 74, 132, 69, 123, 68, 70, 122, 96, 112, 118, - 93, 90, 67, 116, 91, 89, 84, 78, 0, 246, - 0, 109, 120, 133, 259, 316, 127, 128, 129, 130, - 95, 71, 81, 107, 257, 258, 255, 256, 292, 293, - 325, 326, 327, 308, 252, 0, 0, 311, 295, 64, - 0, 86, 131, 103, 79, 121, 320, 310, 283, 322, - 261, 275, 330, 276, 277, 304, 249, 291, 98, 273, - 0, 264, 244, 270, 245, 262, 285, 77, 288, 260, - 312, 294, 85, 328, 87, 299, 0, 108, 94, 0, - 0, 287, 314, 289, 309, 282, 305, 254, 298, 323, - 274, 302, 324, 0, 0, 0, 210, 0, 0, 0, - 0, 0, 0, 0, 0, 72, 0, 301, 319, 272, - 303, 243, 300, 0, 247, 250, 329, 317, 267, 268, - 0, 0, 0, 0, 0, 0, 0, 286, 290, 306, - 280, 0, 0, 0, 0, 0, 0, 0, 0, 265, - 0, 297, 0, 0, 0, 251, 248, 0, 284, 0, - 0, 0, 253, 0, 266, 307, 0, 315, 281, 146, - 318, 279, 278, 321, 101, 313, 263, 271, 73, 269, - 106, 99, 0, 296, 100, 105, 88, 113, 102, 119, - 125, 126, 111, 124, 66, 117, 110, 92, 82, 83, - 65, 0, 104, 76, 80, 75, 97, 114, 115, 74, - 132, 69, 123, 68, 70, 122, 96, 112, 118, 93, - 90, 67, 116, 91, 89, 84, 78, 0, 246, 0, - 109, 120, 133, 259, 316, 127, 128, 129, 130, 95, - 71, 81, 107, 257, 258, 255, 256, 292, 293, 325, - 326, 327, 308, 252, 0, 0, 311, 295, 64, 0, - 86, 131, 103, 79, 121, 320, 310, 283, 322, 261, - 275, 330, 276, 277, 304, 249, 291, 98, 273, 0, - 264, 244, 270, 245, 262, 285, 77, 288, 260, 312, - 294, 85, 328, 87, 299, 0, 108, 94, 0, 0, - 287, 314, 289, 309, 282, 305, 254, 298, 323, 274, - 302, 324, 0, 0, 0, 144, 0, 0, 0, 0, - 0, 0, 0, 0, 72, 0, 301, 319, 272, 303, - 243, 300, 0, 247, 250, 329, 317, 267, 268, 0, - 0, 0, 0, 0, 0, 0, 286, 290, 306, 280, - 0, 0, 0, 0, 0, 0, 0, 0, 265, 0, - 297, 0, 0, 0, 251, 248, 0, 284, 0, 0, - 0, 253, 0, 266, 307, 0, 315, 281, 146, 318, - 279, 278, 321, 101, 313, 263, 271, 73, 269, 106, - 99, 0, 296, 100, 105, 88, 113, 102, 119, 125, - 126, 111, 124, 66, 117, 110, 92, 82, 83, 65, - 0, 104, 76, 80, 75, 97, 114, 115, 74, 132, - 69, 123, 68, 70, 122, 96, 112, 118, 93, 90, - 67, 116, 91, 89, 84, 78, 0, 246, 0, 109, - 120, 133, 259, 316, 127, 128, 129, 130, 95, 71, - 81, 107, 257, 258, 255, 256, 292, 293, 325, 326, - 327, 308, 252, 0, 0, 311, 295, 64, 0, 86, - 131, 103, 79, 121, 98, 0, 0, 744, 0, 350, - 0, 0, 0, 77, 0, 349, 0, 0, 85, 388, - 87, 0, 0, 108, 94, 0, 0, 0, 0, 381, - 382, 0, 0, 0, 0, 0, 0, 0, 0, 41, - 0, 0, 210, 368, 367, 370, 371, 372, 373, 0, - 0, 72, 369, 374, 375, 376, 0, 0, 347, 361, - 0, 387, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 965, 0, 0, 0, 966, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 358, 359, 747, 0, 0, 0, 399, 0, 360, - 0, 0, 356, 357, 362, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 146, 0, 0, 397, 0, - 101, 0, 0, 0, 73, 0, 106, 99, 0, 0, - 100, 105, 88, 113, 102, 119, 125, 126, 111, 124, - 66, 117, 110, 92, 82, 83, 65, 0, 104, 76, - 80, 75, 97, 114, 115, 74, 132, 69, 123, 68, - 70, 122, 96, 112, 118, 93, 90, 67, 116, 91, - 89, 84, 78, 0, 0, 0, 109, 120, 133, 0, - 0, 127, 128, 129, 130, 95, 71, 81, 107, 389, - 398, 395, 396, 393, 394, 392, 391, 390, 400, 383, - 384, 386, 0, 385, 64, 0, 86, 131, 103, 79, - 121, 98, 0, 0, 0, 0, 350, 0, 0, 0, - 77, 0, 349, 0, 0, 85, 388, 87, 0, 0, - 108, 94, 0, 0, 0, 0, 381, 382, 0, 0, - 0, 0, 0, 0, 0, 0, 41, 0, 341, 210, - 368, 367, 370, 371, 372, 373, 0, 0, 72, 369, - 374, 375, 376, 0, 0, 347, 361, 0, 387, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 358, 359, - 0, 0, 0, 0, 399, 0, 360, 0, 0, 356, - 357, 362, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 146, 0, 0, 397, 0, 101, 0, 0, - 0, 73, 0, 106, 99, 0, 0, 100, 105, 88, - 113, 102, 119, 125, 126, 111, 124, 66, 117, 110, - 92, 82, 83, 65, 0, 104, 76, 80, 75, 97, - 114, 115, 74, 132, 69, 123, 68, 70, 122, 96, - 112, 118, 93, 90, 67, 116, 91, 89, 84, 78, - 0, 0, 0, 109, 120, 133, 0, 0, 127, 128, - 129, 130, 95, 71, 81, 107, 389, 398, 395, 396, - 393, 394, 392, 391, 390, 400, 383, 384, 386, 0, - 385, 64, 0, 86, 131, 103, 79, 121, 98, 0, - 0, 0, 0, 350, 0, 0, 0, 77, 0, 349, - 0, 0, 85, 388, 87, 0, 0, 108, 94, 0, - 0, 0, 0, 381, 382, 0, 0, 0, 0, 0, - 0, 0, 0, 41, 0, 0, 210, 368, 367, 370, - 371, 372, 373, 0, 0, 72, 369, 374, 375, 376, - 0, 0, 347, 361, 0, 387, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 146, 0, 0, 0, 146, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 358, 359, 747, 0, 0, - 0, 399, 0, 360, 0, 0, 356, 357, 362, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, - 0, 0, 397, 0, 101, 0, 0, 0, 73, 0, - 106, 99, 0, 0, 100, 105, 88, 113, 102, 119, - 125, 126, 111, 124, 66, 117, 110, 92, 82, 83, - 65, 0, 104, 76, 80, 75, 97, 114, 115, 74, - 132, 69, 123, 68, 70, 122, 96, 112, 118, 93, - 90, 67, 116, 91, 89, 84, 78, 0, 0, 0, - 109, 120, 133, 0, 0, 127, 128, 129, 130, 95, - 71, 81, 107, 389, 398, 395, 396, 393, 394, 392, - 391, 390, 400, 383, 384, 386, 19, 385, 64, 0, - 86, 131, 103, 79, 121, 0, 0, 98, 0, 0, - 0, 0, 350, 0, 0, 0, 77, 0, 349, 0, - 0, 85, 388, 87, 0, 0, 108, 94, 0, 0, - 0, 0, 381, 382, 0, 0, 0, 0, 0, 0, - 0, 0, 41, 0, 0, 210, 368, 367, 370, 371, - 372, 373, 0, 0, 72, 369, 374, 375, 376, 0, - 0, 347, 361, 0, 387, 0, 0, 0, 0, 0, + 146, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 519, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 0, 0, 0, 65, 0, 721, + 722, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 852, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 0, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 272, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 0, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 0, 0, 0, 65, 0, 721, + 722, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 0, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 0, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 0, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 43, 0, 0, 65, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 0, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 0, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 0, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 0, 0, 0, 65, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 0, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 892, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 0, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 0, 0, 0, 197, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 0, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 626, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 0, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 0, 0, 0, 65, 0, 530, + 0, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 0, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 0, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 0, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 0, 0, 0, 65, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 0, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 0, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 0, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 0, 0, 0, 197, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 0, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 0, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 381, 371, 0, 344, + 383, 322, 336, 391, 337, 338, 365, 310, 352, 101, + 334, 0, 325, 305, 331, 306, 323, 346, 80, 349, + 321, 373, 355, 88, 389, 90, 360, 0, 111, 97, + 0, 0, 348, 375, 350, 370, 343, 366, 315, 359, + 384, 335, 363, 385, 0, 0, 0, 147, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 362, 380, + 333, 364, 304, 361, 0, 308, 311, 390, 378, 328, + 329, 0, 0, 0, 0, 0, 0, 0, 347, 351, + 367, 341, 0, 0, 0, 0, 0, 0, 0, 0, + 326, 0, 358, 0, 0, 0, 312, 309, 0, 345, + 0, 0, 0, 314, 0, 327, 368, 0, 376, 342, + 149, 379, 340, 339, 382, 104, 374, 324, 332, 76, + 330, 109, 102, 0, 357, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 307, + 0, 112, 123, 136, 320, 377, 130, 131, 132, 133, + 98, 74, 84, 110, 318, 319, 316, 317, 353, 354, + 386, 387, 388, 369, 313, 0, 0, 372, 356, 67, + 0, 89, 134, 106, 82, 124, 101, 0, 0, 662, + 0, 199, 0, 0, 0, 80, 0, 196, 0, 0, + 88, 236, 90, 0, 0, 111, 97, 0, 0, 0, + 0, 229, 230, 0, 0, 0, 0, 0, 0, 0, + 0, 43, 0, 0, 197, 217, 216, 219, 220, 221, + 222, 0, 0, 75, 218, 223, 224, 225, 0, 0, + 194, 210, 0, 235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 358, 359, 0, 0, 0, 0, - 399, 0, 360, 0, 0, 356, 357, 362, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, - 0, 397, 0, 101, 0, 0, 0, 73, 0, 106, - 99, 0, 0, 100, 105, 88, 113, 102, 119, 125, - 126, 111, 124, 66, 117, 110, 92, 82, 83, 65, - 0, 104, 76, 80, 75, 97, 114, 115, 74, 132, - 69, 123, 68, 70, 122, 96, 112, 118, 93, 90, - 67, 116, 91, 89, 84, 78, 0, 0, 0, 109, - 120, 133, 0, 0, 127, 128, 129, 130, 95, 71, - 81, 107, 389, 398, 395, 396, 393, 394, 392, 391, - 390, 400, 383, 384, 386, 0, 385, 64, 0, 86, - 131, 103, 79, 121, 98, 0, 0, 0, 0, 350, - 0, 0, 0, 77, 0, 349, 0, 0, 85, 388, - 87, 0, 0, 108, 94, 0, 0, 0, 0, 381, - 382, 0, 0, 0, 0, 0, 0, 0, 0, 41, - 0, 0, 210, 368, 367, 370, 371, 372, 373, 0, - 0, 72, 369, 374, 375, 376, 0, 0, 347, 361, - 0, 387, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 207, 208, 190, 0, 0, 0, 247, + 0, 209, 0, 0, 205, 206, 211, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 149, 0, 0, + 245, 0, 104, 0, 0, 0, 76, 0, 109, 102, + 0, 0, 103, 108, 91, 116, 105, 122, 128, 129, + 114, 127, 69, 120, 113, 95, 85, 86, 68, 0, + 107, 79, 83, 78, 100, 117, 118, 77, 135, 72, + 126, 71, 73, 125, 99, 115, 121, 96, 93, 70, + 119, 94, 92, 87, 81, 0, 0, 0, 112, 123, + 136, 0, 0, 130, 131, 132, 133, 98, 74, 84, + 110, 237, 246, 243, 244, 241, 242, 240, 239, 238, + 248, 231, 232, 234, 0, 233, 67, 0, 89, 134, + 106, 82, 124, 101, 0, 0, 0, 0, 199, 0, + 0, 0, 80, 0, 196, 0, 0, 88, 236, 90, + 0, 0, 111, 97, 0, 0, 0, 0, 229, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 43, 0, + 402, 197, 217, 216, 219, 220, 221, 222, 0, 0, + 75, 218, 223, 224, 225, 0, 0, 194, 210, 0, + 235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 358, 359, 0, 0, 0, 0, 399, 0, 360, - 0, 0, 356, 357, 362, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 146, 0, 0, 397, 0, - 101, 0, 0, 0, 73, 0, 106, 99, 0, 0, - 100, 105, 88, 113, 102, 119, 125, 126, 111, 124, - 66, 117, 110, 92, 82, 83, 65, 0, 104, 76, - 80, 75, 97, 114, 115, 74, 132, 69, 123, 68, - 70, 122, 96, 112, 118, 93, 90, 67, 116, 91, - 89, 84, 78, 0, 0, 0, 109, 120, 133, 0, - 0, 127, 128, 129, 130, 95, 71, 81, 107, 389, - 398, 395, 396, 393, 394, 392, 391, 390, 400, 383, - 384, 386, 98, 385, 64, 0, 86, 131, 103, 79, - 121, 77, 0, 0, 0, 0, 85, 388, 87, 0, - 0, 108, 94, 0, 0, 0, 0, 381, 382, 0, - 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, - 210, 368, 367, 370, 371, 372, 373, 0, 0, 72, - 369, 374, 375, 376, 0, 0, 0, 361, 0, 387, + 207, 208, 0, 0, 0, 0, 247, 0, 209, 0, + 0, 205, 206, 211, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 149, 0, 0, 245, 0, 104, + 0, 0, 0, 76, 0, 109, 102, 0, 0, 103, + 108, 91, 116, 105, 122, 128, 129, 114, 127, 69, + 120, 113, 95, 85, 86, 68, 0, 107, 79, 83, + 78, 100, 117, 118, 77, 135, 72, 126, 71, 73, + 125, 99, 115, 121, 96, 93, 70, 119, 94, 92, + 87, 81, 0, 0, 0, 112, 123, 136, 0, 0, + 130, 131, 132, 133, 98, 74, 84, 110, 237, 246, + 243, 244, 241, 242, 240, 239, 238, 248, 231, 232, + 234, 0, 233, 67, 0, 89, 134, 106, 82, 124, + 101, 0, 0, 0, 0, 199, 0, 0, 0, 80, + 0, 196, 0, 0, 88, 236, 90, 0, 0, 111, + 97, 0, 0, 0, 0, 229, 230, 0, 0, 0, + 0, 0, 0, 0, 0, 43, 0, 0, 197, 217, + 216, 219, 220, 221, 222, 0, 0, 75, 218, 223, + 224, 225, 0, 0, 194, 210, 0, 235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 358, - 359, 0, 0, 0, 0, 399, 0, 360, 0, 0, - 356, 357, 362, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 146, 0, 0, 397, 0, 101, 0, - 0, 0, 73, 0, 106, 99, 0, 1084, 100, 105, - 88, 113, 102, 119, 125, 126, 111, 124, 66, 117, - 110, 92, 82, 83, 65, 0, 104, 76, 80, 75, - 97, 114, 115, 74, 132, 69, 123, 68, 70, 122, - 96, 112, 118, 93, 90, 67, 116, 91, 89, 84, - 78, 0, 0, 0, 109, 120, 133, 0, 0, 127, - 128, 129, 130, 95, 71, 81, 107, 389, 398, 395, - 396, 393, 394, 392, 391, 390, 400, 383, 384, 386, - 98, 385, 64, 0, 86, 131, 103, 79, 121, 77, - 0, 0, 0, 0, 85, 388, 87, 0, 0, 108, - 94, 0, 0, 0, 0, 381, 382, 0, 0, 0, - 0, 0, 0, 0, 0, 41, 0, 0, 210, 368, - 367, 370, 371, 372, 373, 0, 0, 72, 369, 374, - 375, 376, 0, 0, 0, 361, 0, 387, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 207, 208, 190, + 0, 0, 0, 247, 0, 209, 0, 0, 205, 206, + 211, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 149, 0, 0, 245, 0, 104, 0, 0, 0, + 76, 0, 109, 102, 0, 0, 103, 108, 91, 116, + 105, 122, 128, 129, 114, 127, 69, 120, 113, 95, + 85, 86, 68, 0, 107, 79, 83, 78, 100, 117, + 118, 77, 135, 72, 126, 71, 73, 125, 99, 115, + 121, 96, 93, 70, 119, 94, 92, 87, 81, 0, + 0, 0, 112, 123, 136, 0, 0, 130, 131, 132, + 133, 98, 74, 84, 110, 237, 246, 243, 244, 241, + 242, 240, 239, 238, 248, 231, 232, 234, 20, 233, + 67, 0, 89, 134, 106, 82, 124, 0, 0, 0, + 101, 0, 0, 0, 0, 199, 0, 0, 0, 80, + 0, 196, 0, 0, 88, 236, 90, 0, 0, 111, + 97, 0, 0, 0, 0, 229, 230, 0, 0, 0, + 0, 0, 0, 0, 0, 43, 0, 0, 197, 217, + 216, 219, 220, 221, 222, 0, 0, 75, 218, 223, + 224, 225, 0, 0, 194, 210, 0, 235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 358, 359, 0, - 0, 0, 0, 399, 0, 360, 0, 0, 356, 357, - 362, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 146, 0, 0, 397, 0, 101, 0, 0, 0, - 73, 0, 106, 99, 0, 0, 100, 105, 88, 113, - 102, 119, 125, 126, 111, 124, 66, 117, 110, 92, - 82, 83, 65, 0, 104, 76, 80, 75, 97, 114, - 115, 74, 132, 69, 123, 68, 70, 122, 96, 112, - 118, 93, 90, 67, 116, 91, 89, 84, 78, 0, - 0, 0, 109, 120, 133, 0, 0, 127, 128, 129, - 130, 95, 71, 81, 107, 389, 398, 395, 396, 393, - 394, 392, 391, 390, 400, 383, 384, 386, 98, 385, - 64, 0, 86, 131, 103, 79, 121, 77, 0, 0, - 0, 0, 85, 0, 87, 0, 0, 108, 94, 0, + 0, 0, 0, 0, 0, 0, 0, 207, 208, 0, + 0, 0, 0, 247, 0, 209, 0, 0, 205, 206, + 211, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 149, 0, 0, 245, 0, 104, 0, 0, 0, + 76, 0, 109, 102, 0, 0, 103, 108, 91, 116, + 105, 122, 128, 129, 114, 127, 69, 120, 113, 95, + 85, 86, 68, 0, 107, 79, 83, 78, 100, 117, + 118, 77, 135, 72, 126, 71, 73, 125, 99, 115, + 121, 96, 93, 70, 119, 94, 92, 87, 81, 0, + 0, 0, 112, 123, 136, 0, 0, 130, 131, 132, + 133, 98, 74, 84, 110, 237, 246, 243, 244, 241, + 242, 240, 239, 238, 248, 231, 232, 234, 0, 233, + 67, 0, 89, 134, 106, 82, 124, 101, 0, 0, + 0, 0, 199, 0, 0, 0, 80, 0, 196, 0, + 0, 88, 236, 90, 0, 0, 111, 97, 0, 0, + 0, 0, 229, 230, 0, 0, 0, 0, 0, 0, + 0, 0, 43, 0, 0, 197, 217, 216, 219, 220, + 221, 222, 0, 0, 75, 218, 223, 224, 225, 0, + 0, 194, 210, 0, 235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 62, 0, 0, 0, - 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, + 0, 0, 0, 0, 207, 208, 0, 0, 0, 0, + 247, 0, 209, 0, 0, 205, 206, 211, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 149, 0, + 0, 245, 0, 104, 0, 0, 0, 76, 0, 109, + 102, 0, 0, 103, 108, 91, 116, 105, 122, 128, + 129, 114, 127, 69, 120, 113, 95, 85, 86, 68, + 0, 107, 79, 83, 78, 100, 117, 118, 77, 135, + 72, 126, 71, 73, 125, 99, 115, 121, 96, 93, + 70, 119, 94, 92, 87, 81, 0, 0, 0, 112, + 123, 136, 0, 0, 130, 131, 132, 133, 98, 74, + 84, 110, 237, 246, 243, 244, 241, 242, 240, 239, + 238, 248, 231, 232, 234, 101, 233, 67, 0, 89, + 134, 106, 82, 124, 80, 0, 0, 0, 0, 88, + 236, 90, 0, 0, 111, 97, 0, 0, 0, 0, + 229, 230, 0, 0, 0, 0, 0, 0, 0, 0, + 43, 0, 0, 197, 217, 216, 219, 220, 221, 222, + 0, 0, 75, 218, 223, 224, 225, 0, 0, 0, + 210, 0, 235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 207, 208, 0, 0, 0, 0, 247, 0, + 209, 0, 0, 205, 206, 211, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 149, 0, 0, 245, + 0, 104, 0, 0, 0, 76, 0, 109, 102, 0, + 1091, 103, 108, 91, 116, 105, 122, 128, 129, 114, + 127, 69, 120, 113, 95, 85, 86, 68, 0, 107, + 79, 83, 78, 100, 117, 118, 77, 135, 72, 126, + 71, 73, 125, 99, 115, 121, 96, 93, 70, 119, + 94, 92, 87, 81, 0, 0, 0, 112, 123, 136, + 0, 0, 130, 131, 132, 133, 98, 74, 84, 110, + 237, 246, 243, 244, 241, 242, 240, 239, 238, 248, + 231, 232, 234, 101, 233, 67, 0, 89, 134, 106, + 82, 124, 80, 0, 0, 0, 0, 88, 236, 90, + 0, 0, 111, 97, 0, 0, 0, 0, 229, 230, + 0, 0, 0, 0, 0, 0, 0, 0, 43, 0, + 0, 197, 217, 216, 219, 220, 221, 222, 0, 0, + 75, 218, 223, 224, 225, 0, 0, 0, 210, 0, + 235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 531, 530, 540, 541, 533, 534, 535, 536, 537, - 538, 539, 532, 0, 0, 542, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, - 0, 0, 0, 0, 101, 0, 0, 0, 73, 0, - 106, 99, 0, 0, 100, 105, 88, 113, 102, 119, - 125, 126, 111, 124, 66, 117, 110, 92, 82, 83, - 65, 0, 104, 76, 80, 75, 97, 114, 115, 74, - 132, 69, 123, 68, 70, 122, 96, 112, 118, 93, - 90, 67, 116, 91, 89, 84, 78, 0, 0, 0, - 109, 120, 133, 0, 0, 127, 128, 129, 130, 95, - 71, 81, 107, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, - 86, 131, 103, 79, 121, 98, 0, 0, 0, 857, - 0, 0, 0, 0, 77, 0, 0, 0, 0, 85, - 0, 87, 0, 0, 108, 94, 0, 0, 0, 0, + 207, 208, 0, 0, 0, 0, 247, 0, 209, 0, + 0, 205, 206, 211, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 149, 0, 0, 245, 0, 104, + 0, 0, 0, 76, 0, 109, 102, 0, 0, 103, + 108, 91, 116, 105, 122, 128, 129, 114, 127, 69, + 120, 113, 95, 85, 86, 68, 0, 107, 79, 83, + 78, 100, 117, 118, 77, 135, 72, 126, 71, 73, + 125, 99, 115, 121, 96, 93, 70, 119, 94, 92, + 87, 81, 0, 0, 0, 112, 123, 136, 0, 0, + 130, 131, 132, 133, 98, 74, 84, 110, 237, 246, + 243, 244, 241, 242, 240, 239, 238, 248, 231, 232, + 234, 0, 233, 67, 0, 89, 134, 106, 82, 124, + 101, 0, 0, 0, 424, 0, 0, 0, 0, 80, + 0, 0, 0, 0, 88, 0, 90, 0, 0, 111, + 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 65, 0, + 426, 0, 0, 0, 0, 0, 0, 75, 0, 0, + 0, 0, 421, 420, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 422, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 62, 0, 859, 0, 0, 0, 0, - 0, 0, 72, 0, 0, 0, 0, 519, 518, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 520, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 149, 0, 0, 0, 0, 104, 0, 0, 0, + 76, 0, 109, 102, 0, 0, 103, 108, 91, 116, + 105, 122, 128, 129, 114, 127, 69, 120, 113, 95, + 85, 86, 68, 0, 107, 79, 83, 78, 100, 117, + 118, 77, 135, 72, 126, 71, 73, 125, 99, 115, + 121, 96, 93, 70, 119, 94, 92, 87, 81, 0, + 0, 0, 112, 123, 136, 0, 101, 130, 131, 132, + 133, 98, 74, 84, 110, 80, 0, 0, 0, 0, + 88, 0, 90, 0, 0, 111, 97, 0, 0, 0, + 67, 0, 89, 134, 106, 82, 124, 0, 0, 0, + 0, 0, 0, 0, 65, 0, 0, 0, 0, 0, + 0, 0, 0, 75, 0, 0, 0, 0, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 146, 0, 0, 0, - 0, 101, 0, 0, 0, 73, 0, 106, 99, 0, - 0, 100, 105, 88, 113, 102, 119, 125, 126, 111, - 124, 66, 117, 110, 92, 82, 83, 65, 0, 104, - 76, 80, 75, 97, 114, 115, 74, 132, 69, 123, - 68, 70, 122, 96, 112, 118, 93, 90, 67, 116, - 91, 89, 84, 78, 0, 0, 0, 109, 120, 133, - 0, 98, 127, 128, 129, 130, 95, 71, 81, 107, - 77, 0, 0, 0, 0, 85, 0, 87, 0, 0, - 108, 94, 0, 0, 0, 64, 0, 86, 131, 103, - 79, 121, 0, 0, 0, 0, 0, 0, 0, 62, - 0, 0, 0, 0, 0, 0, 0, 0, 72, 0, - 0, 0, 0, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 62, 0, 60, 0, 0, + 0, 63, 104, 0, 0, 0, 76, 0, 109, 102, + 0, 0, 103, 108, 91, 116, 105, 122, 128, 129, + 114, 127, 69, 120, 113, 95, 85, 86, 68, 0, + 107, 79, 83, 78, 100, 117, 118, 77, 135, 72, + 126, 71, 73, 125, 99, 115, 121, 96, 93, 70, + 119, 94, 92, 87, 81, 0, 0, 0, 112, 123, + 136, 0, 0, 130, 131, 132, 133, 98, 74, 84, + 110, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 67, 0, 89, 134, + 106, 82, 124, 101, 0, 0, 0, 518, 0, 0, + 0, 0, 80, 0, 0, 0, 0, 88, 0, 90, + 0, 0, 111, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 59, 0, 57, 0, 0, 0, 60, 101, 0, 0, - 0, 73, 0, 106, 99, 0, 0, 100, 105, 88, - 113, 102, 119, 125, 126, 111, 124, 66, 117, 110, - 92, 82, 83, 65, 0, 104, 76, 80, 75, 97, - 114, 115, 74, 132, 69, 123, 68, 70, 122, 96, - 112, 118, 93, 90, 67, 116, 91, 89, 84, 78, - 0, 0, 0, 109, 120, 133, 0, 0, 127, 128, - 129, 130, 95, 71, 81, 107, 0, 0, 0, 0, + 0, 147, 0, 520, 0, 0, 0, 0, 0, 0, + 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 64, 0, 86, 131, 103, 79, 121, 98, 0, - 0, 0, 435, 0, 0, 0, 0, 77, 0, 0, - 0, 0, 85, 0, 87, 0, 0, 108, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 144, 0, 437, 0, - 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 149, 0, 0, 0, 0, 104, + 0, 0, 0, 76, 0, 109, 102, 0, 0, 103, + 108, 91, 116, 105, 122, 128, 129, 114, 127, 69, + 120, 113, 95, 85, 86, 68, 0, 107, 79, 83, + 78, 100, 117, 118, 77, 135, 72, 126, 71, 73, + 125, 99, 115, 121, 96, 93, 70, 119, 94, 92, + 87, 81, 0, 0, 0, 112, 123, 136, 0, 0, + 130, 131, 132, 133, 98, 74, 84, 110, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 101, 0, 67, 0, 89, 134, 106, 82, 124, + 80, 0, 0, 0, 0, 88, 0, 90, 0, 0, + 111, 97, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 43, 0, 0, 65, + 0, 0, 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, - 0, 0, 0, 0, 101, 0, 0, 0, 73, 0, - 106, 99, 0, 0, 100, 105, 88, 113, 102, 119, - 125, 126, 111, 124, 66, 117, 110, 92, 82, 83, - 65, 0, 104, 76, 80, 75, 97, 114, 115, 74, - 132, 69, 123, 68, 70, 122, 96, 112, 118, 93, - 90, 67, 116, 91, 89, 84, 78, 0, 0, 0, - 109, 120, 133, 0, 0, 127, 128, 129, 130, 95, - 71, 81, 107, 0, 0, 19, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 98, 0, 64, 0, - 86, 131, 103, 79, 121, 77, 0, 0, 0, 0, - 85, 0, 87, 0, 0, 108, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 41, 0, 0, 62, 0, 0, 0, 0, 0, - 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 149, 0, 0, 0, 0, 104, 0, 0, + 0, 76, 0, 109, 102, 0, 0, 103, 108, 91, + 116, 105, 122, 128, 129, 114, 127, 69, 120, 113, + 95, 85, 86, 68, 0, 107, 79, 83, 78, 100, + 117, 118, 77, 135, 72, 126, 71, 73, 125, 99, + 115, 121, 96, 93, 70, 119, 94, 92, 87, 81, + 0, 0, 0, 112, 123, 136, 0, 0, 130, 131, + 132, 133, 98, 74, 84, 110, 0, 20, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 101, + 0, 67, 0, 89, 134, 106, 82, 124, 80, 0, + 0, 0, 0, 88, 0, 90, 0, 0, 111, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 146, 0, 0, - 0, 0, 101, 0, 0, 0, 73, 0, 106, 99, - 0, 0, 100, 105, 88, 113, 102, 119, 125, 126, - 111, 124, 66, 117, 110, 92, 82, 83, 65, 0, - 104, 76, 80, 75, 97, 114, 115, 74, 132, 69, - 123, 68, 70, 122, 96, 112, 118, 93, 90, 67, - 116, 91, 89, 84, 78, 0, 0, 0, 109, 120, - 133, 0, 0, 127, 128, 129, 130, 95, 71, 81, - 107, 0, 0, 19, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 98, 0, 64, 0, 86, 131, - 103, 79, 121, 77, 0, 0, 0, 0, 85, 0, - 87, 0, 0, 108, 94, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 41, - 0, 0, 144, 0, 0, 0, 0, 0, 0, 0, - 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 43, 0, 0, 147, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, - 101, 0, 0, 0, 73, 0, 106, 99, 0, 0, - 100, 105, 88, 113, 102, 119, 125, 126, 111, 124, - 66, 117, 110, 92, 82, 83, 65, 0, 104, 76, - 80, 75, 97, 114, 115, 74, 132, 69, 123, 68, - 70, 122, 96, 112, 118, 93, 90, 67, 116, 91, - 89, 84, 78, 0, 0, 0, 109, 120, 133, 0, - 98, 127, 128, 129, 130, 95, 71, 81, 107, 77, - 0, 0, 0, 0, 85, 0, 87, 0, 0, 108, - 94, 0, 0, 0, 64, 0, 86, 131, 103, 79, - 121, 0, 0, 0, 0, 0, 0, 0, 62, 0, - 0, 599, 0, 0, 600, 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 149, 0, 0, 0, 0, 104, 0, 0, 0, 76, + 0, 109, 102, 0, 0, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 0, + 0, 112, 123, 136, 0, 101, 130, 131, 132, 133, + 98, 74, 84, 110, 80, 0, 0, 0, 0, 88, + 0, 90, 0, 0, 111, 97, 0, 0, 0, 67, + 0, 89, 134, 106, 82, 124, 0, 0, 0, 0, + 0, 0, 0, 65, 0, 0, 613, 0, 0, 614, + 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 146, 0, 0, 0, 0, 101, 0, 0, 0, - 73, 0, 106, 99, 0, 0, 100, 105, 88, 113, - 102, 119, 125, 126, 111, 124, 66, 117, 110, 92, - 82, 83, 65, 0, 104, 76, 80, 75, 97, 114, - 115, 74, 132, 69, 123, 68, 70, 122, 96, 112, - 118, 93, 90, 67, 116, 91, 89, 84, 78, 0, - 0, 0, 109, 120, 133, 0, 0, 127, 128, 129, - 130, 95, 71, 81, 107, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 149, 0, 0, 0, + 0, 104, 0, 0, 0, 76, 0, 109, 102, 0, + 0, 103, 108, 91, 116, 105, 122, 128, 129, 114, + 127, 69, 120, 113, 95, 85, 86, 68, 0, 107, + 79, 83, 78, 100, 117, 118, 77, 135, 72, 126, + 71, 73, 125, 99, 115, 121, 96, 93, 70, 119, + 94, 92, 87, 81, 0, 0, 0, 112, 123, 136, + 0, 0, 130, 131, 132, 133, 98, 74, 84, 110, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 64, 0, 86, 131, 103, 79, 121, 98, 0, 0, - 0, 435, 0, 0, 0, 0, 77, 0, 0, 0, - 0, 85, 0, 87, 0, 0, 108, 94, 0, 0, + 0, 0, 0, 0, 0, 67, 0, 89, 134, 106, + 82, 124, 101, 0, 0, 0, 518, 0, 0, 0, + 0, 80, 0, 0, 0, 0, 88, 0, 90, 0, + 0, 111, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 144, 0, 437, 0, 0, - 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, + 147, 0, 520, 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 146, 0, - 0, 0, 0, 101, 0, 0, 0, 73, 0, 106, - 99, 0, 0, 433, 105, 88, 113, 102, 119, 125, - 126, 111, 124, 66, 117, 110, 92, 82, 83, 65, - 0, 104, 76, 80, 75, 97, 114, 115, 74, 132, - 69, 123, 68, 70, 122, 96, 112, 118, 93, 90, - 67, 116, 91, 89, 84, 78, 0, 0, 0, 109, - 120, 133, 0, 98, 127, 128, 129, 130, 95, 71, - 81, 107, 77, 0, 0, 0, 0, 85, 0, 87, - 0, 0, 108, 94, 0, 0, 0, 64, 0, 86, - 131, 103, 79, 121, 0, 0, 0, 0, 41, 0, - 0, 144, 0, 0, 0, 0, 0, 0, 0, 0, - 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 149, 0, 0, 0, 0, 104, 0, + 0, 0, 76, 0, 109, 102, 0, 0, 516, 108, + 91, 116, 105, 122, 128, 129, 114, 127, 69, 120, + 113, 95, 85, 86, 68, 0, 107, 79, 83, 78, + 100, 117, 118, 77, 135, 72, 126, 71, 73, 125, + 99, 115, 121, 96, 93, 70, 119, 94, 92, 87, + 81, 0, 0, 0, 112, 123, 136, 0, 101, 130, + 131, 132, 133, 98, 74, 84, 110, 80, 0, 0, + 0, 0, 88, 0, 90, 0, 0, 111, 97, 0, + 0, 0, 67, 0, 89, 134, 106, 82, 124, 0, + 0, 0, 0, 43, 0, 0, 147, 0, 0, 0, + 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 146, 0, 0, 0, 0, 101, - 0, 0, 0, 73, 0, 106, 99, 0, 0, 100, - 105, 88, 113, 102, 119, 125, 126, 111, 124, 66, - 117, 110, 92, 82, 83, 65, 0, 104, 76, 80, - 75, 97, 114, 115, 74, 132, 69, 123, 68, 70, - 122, 96, 112, 118, 93, 90, 67, 116, 91, 89, - 84, 78, 0, 0, 0, 109, 120, 133, 0, 98, - 127, 128, 129, 130, 95, 71, 81, 107, 77, 0, - 0, 0, 0, 85, 0, 87, 0, 0, 108, 94, - 0, 0, 0, 64, 0, 86, 131, 103, 79, 121, - 0, 0, 0, 0, 0, 0, 0, 62, 0, 859, - 0, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 149, + 0, 0, 0, 0, 104, 0, 0, 0, 76, 0, + 109, 102, 0, 0, 103, 108, 91, 116, 105, 122, + 128, 129, 114, 127, 69, 120, 113, 95, 85, 86, + 68, 0, 107, 79, 83, 78, 100, 117, 118, 77, + 135, 72, 126, 71, 73, 125, 99, 115, 121, 96, + 93, 70, 119, 94, 92, 87, 81, 0, 0, 0, + 112, 123, 136, 0, 101, 130, 131, 132, 133, 98, + 74, 84, 110, 80, 0, 0, 0, 0, 88, 0, + 90, 0, 0, 111, 97, 0, 0, 0, 67, 0, + 89, 134, 106, 82, 124, 0, 0, 0, 0, 0, + 0, 0, 147, 0, 520, 0, 0, 0, 0, 0, + 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 146, 0, 0, 0, 0, 101, 0, 0, 0, 73, - 0, 106, 99, 0, 0, 100, 105, 88, 113, 102, - 119, 125, 126, 111, 124, 66, 117, 110, 92, 82, - 83, 65, 0, 104, 76, 80, 75, 97, 114, 115, - 74, 132, 69, 123, 68, 70, 122, 96, 112, 118, - 93, 90, 67, 116, 91, 89, 84, 78, 0, 0, - 0, 109, 120, 133, 0, 98, 127, 128, 129, 130, - 95, 71, 81, 107, 77, 0, 0, 0, 0, 85, - 0, 87, 0, 0, 108, 94, 0, 0, 0, 64, - 0, 86, 131, 103, 79, 121, 0, 0, 0, 0, - 0, 0, 0, 144, 0, 437, 0, 0, 0, 0, - 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 149, 0, 0, 0, 0, + 104, 0, 0, 0, 76, 0, 109, 102, 0, 0, + 103, 108, 91, 116, 105, 122, 128, 129, 114, 127, + 69, 120, 113, 95, 85, 86, 68, 0, 107, 79, + 83, 78, 100, 117, 118, 77, 135, 72, 126, 71, + 73, 125, 99, 115, 121, 96, 93, 70, 119, 94, + 92, 87, 81, 0, 0, 0, 112, 123, 136, 0, + 101, 130, 131, 132, 133, 98, 74, 84, 110, 80, + 0, 0, 0, 0, 88, 0, 90, 0, 0, 111, + 97, 0, 0, 0, 67, 0, 89, 134, 106, 82, + 124, 0, 0, 0, 0, 0, 0, 0, 65, 0, + 426, 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 146, 0, 0, 0, - 0, 101, 0, 0, 0, 73, 0, 106, 99, 0, - 0, 100, 105, 88, 113, 102, 119, 125, 126, 111, - 124, 66, 117, 110, 92, 82, 83, 65, 0, 104, - 76, 80, 75, 97, 114, 115, 74, 132, 69, 123, - 68, 70, 122, 96, 112, 118, 93, 90, 67, 116, - 91, 89, 84, 78, 0, 0, 0, 109, 120, 133, - 0, 0, 127, 128, 129, 130, 95, 71, 81, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 98, 64, 0, 86, 131, 103, - 79, 121, 412, 77, 0, 0, 0, 0, 85, 0, - 87, 0, 0, 108, 94, 0, 0, 0, 0, 0, + 0, 149, 0, 0, 0, 0, 104, 0, 0, 0, + 76, 0, 109, 102, 0, 0, 103, 108, 91, 116, + 105, 122, 128, 129, 114, 127, 69, 120, 113, 95, + 85, 86, 68, 0, 107, 79, 83, 78, 100, 117, + 118, 77, 135, 72, 126, 71, 73, 125, 99, 115, + 121, 96, 93, 70, 119, 94, 92, 87, 81, 0, + 0, 0, 112, 123, 136, 0, 0, 130, 131, 132, + 133, 98, 74, 84, 110, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 101, + 67, 0, 89, 134, 106, 82, 124, 496, 80, 0, + 0, 0, 0, 88, 0, 90, 0, 0, 111, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 144, 0, 0, 0, 0, 0, 0, 0, - 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 147, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, - 101, 0, 0, 0, 73, 0, 106, 99, 0, 0, - 100, 105, 88, 113, 102, 119, 125, 126, 111, 124, - 66, 117, 110, 92, 82, 83, 65, 0, 104, 76, - 80, 75, 97, 114, 115, 74, 132, 69, 123, 68, - 70, 122, 96, 112, 118, 93, 90, 67, 116, 91, - 89, 84, 78, 195, 0, 0, 109, 120, 133, 0, - 98, 127, 128, 129, 130, 95, 71, 81, 107, 77, - 0, 0, 0, 0, 85, 0, 87, 0, 0, 108, - 94, 0, 0, 0, 64, 0, 86, 131, 103, 79, - 121, 0, 0, 0, 0, 0, 0, 0, 144, 0, - 0, 0, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 149, 0, 0, 0, 0, 104, 0, 0, 0, 76, + 0, 109, 102, 0, 0, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 258, 0, + 0, 112, 123, 136, 0, 101, 130, 131, 132, 133, + 98, 74, 84, 110, 80, 0, 0, 0, 0, 88, + 0, 90, 0, 0, 111, 97, 0, 0, 0, 67, + 0, 89, 134, 106, 82, 124, 0, 0, 0, 0, + 0, 0, 0, 147, 0, 0, 0, 0, 0, 0, + 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 146, 0, 0, 0, 0, 101, 0, 0, 0, - 73, 0, 106, 99, 0, 0, 100, 105, 88, 113, - 102, 119, 125, 126, 111, 124, 66, 117, 110, 92, - 82, 83, 65, 0, 104, 76, 80, 75, 97, 114, - 115, 74, 132, 69, 123, 68, 70, 122, 96, 112, - 118, 93, 90, 67, 116, 91, 89, 84, 78, 0, - 0, 0, 109, 120, 133, 0, 98, 127, 128, 129, - 130, 95, 71, 81, 107, 77, 0, 0, 0, 0, - 85, 0, 87, 0, 0, 108, 94, 0, 0, 0, - 64, 0, 86, 131, 103, 79, 121, 0, 0, 0, - 0, 0, 0, 0, 144, 0, 0, 0, 0, 0, - 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 149, 0, 0, 0, + 0, 104, 0, 0, 0, 76, 0, 109, 102, 0, + 0, 103, 108, 91, 116, 105, 122, 128, 129, 114, + 127, 69, 120, 113, 95, 85, 86, 68, 0, 107, + 79, 83, 78, 100, 117, 118, 77, 135, 72, 126, + 71, 73, 125, 99, 115, 121, 96, 93, 70, 119, + 94, 92, 87, 81, 0, 0, 0, 112, 123, 136, + 0, 101, 130, 131, 132, 133, 98, 74, 84, 110, + 80, 0, 0, 0, 0, 88, 0, 90, 0, 0, + 111, 97, 0, 0, 0, 67, 0, 89, 134, 106, + 82, 124, 0, 0, 0, 0, 0, 0, 0, 147, + 0, 0, 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 141, 0, 146, 0, 0, - 0, 0, 101, 0, 0, 0, 73, 0, 106, 99, - 0, 0, 100, 105, 88, 113, 102, 119, 125, 126, - 111, 124, 66, 117, 110, 92, 82, 83, 65, 0, - 104, 76, 80, 75, 97, 114, 115, 74, 132, 69, - 123, 68, 70, 122, 96, 112, 118, 93, 90, 67, - 116, 91, 89, 84, 78, 0, 0, 0, 109, 120, - 133, 0, 98, 127, 128, 129, 130, 95, 71, 81, - 107, 77, 0, 0, 0, 0, 85, 0, 87, 0, - 0, 108, 94, 0, 0, 0, 64, 0, 86, 131, - 103, 79, 121, 0, 0, 0, 0, 0, 0, 0, - 62, 0, 0, 0, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 144, 0, 149, 0, 0, 0, 0, 104, 0, 0, + 0, 76, 0, 109, 102, 0, 0, 103, 108, 91, + 116, 105, 122, 128, 129, 114, 127, 69, 120, 113, + 95, 85, 86, 68, 0, 107, 79, 83, 78, 100, + 117, 118, 77, 135, 72, 126, 71, 73, 125, 99, + 115, 121, 96, 93, 70, 119, 94, 92, 87, 81, + 0, 0, 0, 112, 123, 136, 0, 101, 130, 131, + 132, 133, 98, 74, 84, 110, 80, 0, 0, 0, + 0, 88, 0, 90, 0, 0, 111, 97, 0, 0, + 0, 67, 0, 89, 134, 106, 82, 124, 0, 0, + 0, 0, 0, 0, 0, 65, 0, 0, 0, 0, + 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 146, 0, 0, 0, 0, 101, 0, - 0, 0, 73, 0, 106, 99, 0, 0, 100, 105, - 88, 113, 102, 119, 125, 126, 111, 124, 66, 117, - 110, 92, 82, 83, 65, 0, 104, 76, 80, 75, - 97, 114, 115, 74, 132, 69, 123, 68, 70, 122, - 96, 112, 118, 93, 90, 67, 116, 91, 89, 84, - 78, 0, 0, 0, 109, 120, 133, 0, 98, 127, - 128, 129, 130, 95, 71, 81, 107, 77, 0, 0, - 0, 0, 85, 0, 87, 0, 0, 108, 94, 0, - 0, 0, 64, 0, 86, 131, 103, 79, 121, 0, - 0, 0, 0, 0, 0, 0, 210, 0, 0, 0, - 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 149, 0, + 0, 0, 0, 104, 0, 0, 0, 76, 0, 109, + 102, 0, 0, 103, 108, 91, 116, 105, 122, 128, + 129, 114, 127, 69, 120, 113, 95, 85, 86, 68, + 0, 107, 79, 83, 78, 100, 117, 118, 77, 135, + 72, 126, 71, 73, 125, 99, 115, 121, 96, 93, + 70, 119, 94, 92, 87, 81, 0, 0, 0, 112, + 123, 136, 0, 101, 130, 131, 132, 133, 98, 74, + 84, 110, 80, 0, 0, 0, 0, 88, 0, 90, + 0, 0, 111, 97, 0, 0, 0, 67, 0, 89, + 134, 106, 82, 124, 0, 0, 0, 0, 0, 0, + 0, 197, 0, 0, 0, 0, 0, 0, 0, 0, + 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 146, - 0, 0, 0, 0, 101, 0, 0, 0, 73, 0, - 106, 99, 0, 0, 100, 105, 88, 113, 102, 119, - 125, 126, 111, 124, 66, 117, 110, 92, 82, 83, - 65, 0, 104, 76, 80, 75, 97, 114, 115, 74, - 132, 69, 123, 68, 70, 122, 96, 112, 118, 93, - 90, 67, 116, 91, 89, 84, 78, 0, 0, 0, - 109, 120, 133, 0, 98, 127, 128, 129, 130, 95, - 71, 81, 107, 77, 0, 0, 0, 0, 85, 0, - 87, 0, 0, 108, 94, 0, 0, 0, 64, 0, - 86, 131, 103, 79, 121, 0, 0, 0, 0, 0, - 0, 0, 144, 0, 0, 0, 0, 0, 0, 0, - 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 149, 0, 0, 0, 0, 104, + 0, 0, 0, 76, 0, 109, 102, 0, 0, 103, + 108, 91, 116, 105, 122, 128, 129, 114, 127, 69, + 120, 113, 95, 85, 86, 68, 0, 107, 79, 83, + 78, 100, 117, 118, 77, 135, 72, 126, 71, 73, + 125, 99, 115, 121, 96, 93, 70, 119, 94, 92, + 87, 81, 0, 0, 0, 112, 123, 136, 0, 101, + 130, 131, 132, 133, 98, 74, 84, 110, 80, 0, + 0, 0, 0, 88, 0, 90, 0, 0, 111, 97, + 0, 0, 0, 67, 0, 89, 134, 106, 82, 124, + 0, 0, 0, 0, 0, 0, 0, 147, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 146, 0, 0, 0, 0, - 101, 0, 0, 0, 73, 0, 106, 99, 0, 0, - 100, 105, 88, 113, 102, 119, 125, 126, 111, 124, - 66, 117, 110, 92, 82, 83, 65, 0, 104, 76, - 80, 75, 97, 114, 115, 74, 132, 69, 123, 68, - 70, 122, 96, 112, 118, 93, 90, 67, 116, 91, - 89, 84, 78, 0, 0, 0, 109, 120, 133, 0, - 98, 127, 128, 129, 130, 95, 71, 81, 107, 77, - 0, 0, 0, 0, 85, 0, 87, 0, 0, 108, - 94, 0, 0, 0, 64, 0, 86, 131, 103, 79, - 121, 0, 0, 0, 0, 0, 0, 0, 210, 0, - 0, 0, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 149, 0, 0, 0, 0, 104, 0, 0, 0, 76, + 0, 109, 102, 0, 0, 103, 108, 91, 116, 105, + 122, 128, 129, 114, 127, 69, 120, 113, 95, 85, + 86, 68, 0, 107, 79, 83, 78, 100, 117, 118, + 77, 135, 72, 126, 71, 73, 125, 99, 115, 121, + 96, 93, 70, 119, 94, 92, 87, 81, 0, 0, + 0, 112, 123, 136, 0, 101, 130, 131, 132, 133, + 98, 74, 84, 110, 80, 0, 0, 0, 0, 88, + 0, 90, 0, 0, 111, 97, 0, 0, 0, 67, + 0, 89, 134, 106, 82, 124, 0, 0, 0, 0, + 0, 0, 0, 197, 0, 0, 0, 0, 0, 0, + 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 146, 0, 0, 0, 0, 101, 0, 0, 0, - 73, 0, 106, 99, 0, 0, 100, 105, 88, 113, - 102, 119, 125, 126, 111, 124, 66, 117, 110, 92, - 82, 83, 65, 0, 104, 76, 80, 75, 97, 114, - 115, 74, 132, 69, 123, 68, 205, 122, 96, 112, - 118, 93, 90, 67, 116, 91, 89, 84, 78, 0, - 0, 0, 109, 120, 133, 0, 0, 127, 128, 129, - 130, 206, 204, 203, 202, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 149, 0, 0, 0, + 0, 104, 0, 0, 0, 76, 0, 109, 102, 0, + 0, 103, 108, 91, 116, 105, 122, 128, 129, 114, + 127, 69, 120, 113, 95, 85, 86, 68, 0, 107, + 79, 83, 78, 100, 117, 118, 77, 135, 72, 126, + 71, 268, 125, 99, 115, 121, 96, 93, 70, 119, + 94, 92, 87, 81, 0, 0, 0, 112, 123, 136, + 0, 0, 130, 131, 132, 133, 269, 267, 266, 265, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 64, 0, 86, 131, 103, 79, 121, + 0, 0, 0, 0, 0, 67, 0, 89, 134, 106, + 82, 124, } var yyPact = [...]int{ - 1358, -1000, -157, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 713, 732, -1000, - -1000, -1000, -1000, -1000, 543, 5674, -3, 35, 18, 7629, - 34, 185, 8127, -1000, -1000, -1000, -1000, -1000, 517, -1000, - -1000, -1000, -1000, -1000, 698, 711, 542, 686, 612, -1000, - -4, 6776, 7463, 8293, -1000, 382, 27, 8127, -129, -9, + 255, -1000, -154, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 725, 759, + -1000, -1000, -1000, -1000, -1000, -1000, 552, 5458, -7, 32, + 16, 7413, 31, 1035, 7911, -1000, -1000, -1000, -1000, -1000, + 482, -1000, -1000, -1000, -1000, -1000, 720, 723, 523, 707, + 625, -1000, 4322, -17, 6560, 7247, 8077, -1000, 319, 28, + 7911, -127, -19, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 30, 7911, -1000, + 7911, -20, 317, -20, 7911, -1000, 80, -1000, -1000, -1000, + 7911, 283, 660, 24, 2471, 2471, 2471, 2471, -48, 2471, + 2471, 575, -1000, -1000, -1000, -1000, 2471, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 303, 692, 4719, 4719, 725, + -1000, 482, -1000, -1000, -1000, 657, -1000, -1000, 195, 746, + -1000, 5292, 79, -1000, 4719, 1489, 465, -1000, -1000, 465, + -1000, -1000, 40, -1000, -1000, 5095, 5095, 5095, 5095, 5095, + 5095, 5095, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 465, -1000, 4522, 465, + 465, 465, 465, 465, 465, 4719, 465, 465, 465, 465, + 465, 465, 465, 465, 465, 465, 465, 465, 465, 7081, + 438, 774, -1000, -1000, -1000, 704, 6031, 6394, 7911, 499, + -1000, 358, 7745, 3101, -1000, -1000, -1000, -1000, 658, -1000, + 124, -1000, 75, 432, -1000, 1326, 278, 2471, 17, 7911, + 150, 7911, 2471, -5, 7911, 701, 569, 7911, -1000, 3731, + -1000, 2471, 2471, 2471, 2471, 2471, 2471, 2471, 2471, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 31, 8127, -1000, 8127, -14, 380, - -14, 8127, -1000, 76, -1000, -1000, -1000, 8127, 378, 662, - 30, 2715, 2715, 2715, 2715, -46, 2715, 2715, 558, -1000, - -1000, -1000, -1000, 2715, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, 307, 657, 4747, 4747, 713, -1000, 517, -1000, - -1000, -1000, 656, -1000, -1000, 184, 7297, 497, 617, -1000, - -1000, -1000, 683, 6247, 6610, 8127, 498, -1000, 390, 7961, - 3133, -1000, -1000, -1000, -1000, 648, -1000, 111, -1000, 75, - -1000, -1000, 452, -1000, 1640, 333, 2715, 19, 8127, 140, - 8127, 2715, 13, 8127, 681, 553, 8127, -1000, 3760, -1000, - 2715, 2715, 2715, 2715, 2715, 2715, 2715, 2715, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, @@ -1450,226 +1438,220 @@ var yyPact = [...]int{ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 2471, 2471, -1000, -1000, 7911, + -1000, -1000, -1000, -1000, 753, 103, 609, 57, 458, -1000, + 393, 720, 303, 625, 6197, 597, -1000, -1000, 7911, -1000, + 4719, 4719, 253, -1000, 6892, -1000, -1000, 2891, 111, 5095, + 224, 179, 5095, 5095, 5095, 5095, 5095, 5095, 5095, 5095, + 5095, 5095, 5095, 5095, 5095, 5095, 5095, 223, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 276, -1000, 482, 655, + 655, 76, 76, 76, 76, 76, 76, 1707, 3928, 303, + 427, 125, 4522, 4322, 4322, 4719, 4719, 4322, 709, 140, + 125, 7579, -1000, 303, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 4322, 4322, 4322, 4322, -35, 7911, -1000, 7745, 6560, + 6560, 6560, 6560, 6560, -1000, 621, 614, -1000, 611, 610, + 617, 7911, -1000, 400, 6031, 82, 465, -1000, 6726, -1000, + -1000, -35, 6560, 7911, -1000, -1000, 7745, 358, -1000, -1000, + -1000, -1000, 4719, 3521, 2261, 200, 188, -102, -1000, -1000, + 473, -1000, 473, 473, 473, 473, -79, -79, -79, -79, + -1000, -1000, -1000, -1000, -1000, 502, -1000, 473, 473, 473, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 490, 490, + 490, 474, 474, 469, -1000, 7911, -1000, 695, 72, -1000, + 7911, -1000, -1000, 7911, 2471, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 2715, 2715, -1000, -1000, 8127, -1000, - -1000, -1000, -1000, 727, 100, 360, -1000, 4747, 1364, 476, - 476, -1000, -1000, 51, -1000, -1000, 5123, 5123, 5123, 5123, - 5123, 5123, 5123, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 476, 73, -1000, - 4550, 476, 476, 476, 476, 476, 476, 4747, 476, 476, - 476, 476, 476, 476, 476, 476, 476, 476, 476, 476, - 476, 472, -1000, 293, 698, 307, 612, 6413, 578, -1000, - -1000, -29, 8127, -1000, 7961, 6776, 6776, 6776, 6776, 6776, - -1000, 601, 590, -1000, 600, 571, 586, 8127, -1000, 419, - 307, 6247, 57, 476, -1000, 7108, -1000, -1000, -29, 6776, - 8127, -1000, -1000, 7961, 390, -1000, -1000, -1000, -1000, 4747, - 3551, 2297, 87, 180, -94, -1000, -1000, 487, -1000, 487, - 487, 487, 487, -72, -72, -72, -72, -1000, -1000, -1000, - -1000, -1000, 541, -1000, 487, 487, 487, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 521, 521, 521, 491, 491, - 505, -1000, 8127, -1000, 680, 85, -1000, 8127, -1000, -1000, - 8127, 2715, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 618, 4747, 4747, - 217, 4747, 4747, 79, 5123, 227, 169, 5123, 5123, 5123, - 5123, 5123, 5123, 5123, 5123, 5123, 5123, 5123, 5123, 5123, - 5123, 5123, 225, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 325, -1000, 517, 506, 506, 82, 82, 82, 82, - 82, 82, 5311, 3957, 3551, 370, 195, 4550, 4351, 4351, - 4747, 4747, 4351, 692, 130, 195, 7795, -1000, 307, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 4351, 4351, 4351, 4351, - 4747, -1000, -1000, -1000, 657, -1000, 692, 705, -1000, 635, - 634, 4351, -1000, 591, 476, -1000, 504, 617, 525, 552, - 638, -1000, -1000, -1000, -1000, 587, -1000, 572, -1000, -1000, - -1000, -1000, -1000, 307, -1000, 26, 24, 22, 7795, -1000, - 720, 440, -1000, -1000, -1000, 195, -1000, 72, -1000, 465, - 2088, -1000, -1000, -1000, -1000, -1000, -1000, 518, 671, 132, - 292, -1000, -1000, 664, -1000, 155, -102, -1000, -1000, 233, - -72, -72, -1000, -1000, 41, 640, 41, 41, 41, 246, - -1000, -1000, -1000, -1000, 228, -1000, -1000, -1000, 210, -1000, - 551, 7795, 2715, -1000, -1000, 135, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -30, -1000, - 2715, -1000, 610, 79, 138, -1000, -1000, 261, -1000, -1000, - 195, 195, 1144, -1000, -1000, -1000, -1000, 227, 5123, 5123, - 5123, 852, 1144, 1446, 245, 663, 82, 74, 74, 81, - 81, 81, 81, 81, 211, 211, -1000, -1000, -1000, 307, - -1000, -1000, -1000, 307, 4351, 463, -1000, -1000, 5508, 68, - 476, 4747, -1000, 311, 311, 375, 345, 311, 4351, 168, - -1000, 4747, 307, -1000, 311, 307, 311, 311, -1000, -1000, - 8127, -1000, -1000, -1000, -1000, 500, 550, 7961, 476, -1000, - 6059, 7795, 713, 4747, -1000, -1000, 4747, 503, -1000, 4747, - -1000, -1000, -1000, -1000, 476, 476, 476, 303, -1000, 713, - -1000, 3342, 2297, -1000, 2297, 7795, -1000, 279, -1000, -1000, - 548, 23, -1000, -1000, -1000, 387, 41, 41, -1000, 259, - 83, -1000, -1000, -1000, 351, -1000, 458, 349, 8127, -1000, - -1000, -1000, 8127, -1000, -1000, -1000, -1000, -1000, 7795, -1000, - -1000, -1000, -1000, -1000, -1000, 852, 1144, 1404, -1000, 5123, - 5123, -1000, -1000, 311, 4351, -1000, -1000, 6942, -1000, -1000, - 2924, 4351, 195, -1000, -1000, 139, 225, 139, -138, 435, - 125, -1000, 4747, 144, -1000, -1000, -1000, -1000, -1000, -1000, - 720, 6776, -1000, 673, 459, 409, -1000, -1000, 4154, 307, - 306, 61, 303, 698, 195, 195, 7795, 195, 7795, 7795, - 7795, 5871, 7795, 698, -1000, 2088, -1000, 301, -1000, 487, - -1000, -87, 726, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 237, 201, -1000, 192, 2715, - -1000, -1000, 675, -1000, 5123, 1144, 1144, -1000, -1000, -1000, - -1000, 58, 307, 307, 487, 487, -1000, 487, 491, -1000, - 487, -54, 487, -55, 307, 307, 476, -135, -1000, 195, - 4747, 718, 445, 670, -1000, 476, -1000, -1000, 468, 7795, - 7795, -1000, -1000, 278, -1000, 270, 270, 270, 57, -1000, - -1000, 505, 7795, -1000, 104, -1000, -119, -1000, 385, 371, - -1000, 476, 1144, 2506, -1000, -1000, -1000, 37, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, 5123, 307, 235, 195, - 716, 709, 725, -1000, 476, -1000, 517, 55, -1000, 7795, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 161, 668, -1000, - 667, -1000, -1000, -1000, -32, -1000, -1000, -1000, 29, -1000, - -1000, -1000, 4747, 4747, 7961, 409, 307, 7795, -1000, -1000, - 196, -1000, -1000, 267, -1000, 7795, 307, 21, -149, 195, - 394, 390, -1000, -1000, -1000, -1000, -32, 624, -1000, 607, - -144, -152, -1000, -37, -1000, 605, -1000, -39, -147, 476, - -150, 4935, -153, 1119, 307, -1000, -1000, -1000, + 638, 4719, 4719, 3521, 4719, -1000, -1000, -1000, 692, -1000, + 709, 714, -1000, 650, 648, 4322, -1000, -1000, 111, 134, + -1000, -1000, 286, -1000, -1000, -1000, -1000, 56, 465, -1000, + 1504, -1000, -1000, -1000, -1000, 224, 5095, 5095, 5095, 25, + 1504, 1613, 309, 1136, 76, 346, 346, 89, 89, 89, + 89, 89, 605, 605, -1000, -1000, -1000, 303, -1000, -1000, + -1000, 303, 4322, 443, -1000, -1000, 4719, -1000, 303, 398, + 398, 369, 306, 398, 4322, 141, -1000, 4719, 303, -1000, + 398, 303, 398, 398, 483, 465, -1000, 430, 774, 495, + 568, 703, -1000, -1000, -1000, -1000, 612, -1000, 599, -1000, + -1000, -1000, -1000, -1000, 27, 22, 20, 7579, -1000, 740, + 408, -1000, -1000, -1000, 125, -1000, 55, 406, 2051, -1000, + -1000, -1000, -1000, -1000, -1000, 486, 670, 83, 263, -1000, + -1000, 662, -1000, 156, -104, -1000, -1000, 231, -79, -79, + -1000, -1000, 37, 654, 37, 37, 37, 245, -1000, -1000, + -1000, -1000, 229, -1000, -1000, -1000, 225, -1000, 567, 7579, + 2471, -1000, -1000, 254, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -36, -1000, 2471, -1000, + 635, 125, 125, -1000, -1000, 7911, -1000, -1000, -1000, -1000, + 468, -1000, -1000, -1000, 2681, 4322, -1000, 25, 1504, 1322, + -1000, 5095, 5095, -1000, -1000, 398, 4322, 125, -1000, -1000, + -1000, 310, 223, 310, -137, 418, 129, -1000, 4719, 256, + -1000, -1000, -1000, -1000, -1000, 556, 7745, 465, -1000, 5843, + 7579, 725, 4719, -1000, -1000, 4719, 484, -1000, 4719, -1000, + -1000, -1000, 465, 465, 465, 334, -1000, 725, -1000, 3311, + 2261, -1000, 2261, 7579, -1000, 251, -1000, -1000, 555, 33, + -1000, -1000, -1000, 345, 37, 37, -1000, 249, 146, -1000, + -1000, -1000, 372, -1000, 404, 344, 7911, -1000, -1000, -1000, + 7911, -1000, -1000, -1000, -1000, -1000, 7579, -1000, -1000, -1000, + 740, 6560, -1000, -1000, 303, -1000, 5095, 1504, 1504, -1000, + -1000, 303, 473, 473, -1000, 473, 474, -1000, 473, -61, + 473, -63, 303, 303, 465, -133, -1000, 125, 4719, -1000, + 681, 460, 370, -1000, -1000, 4125, 303, 336, 50, 334, + 720, 125, 125, 7579, 125, 7579, 7579, 7579, 5655, 7579, + 720, 2051, -1000, 329, -1000, 473, -1000, -97, 750, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 243, 214, -1000, 213, 2471, -1000, -1000, 693, 736, + 383, -1000, 1504, -1000, -1000, 26, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 5095, 303, 242, 125, 669, -1000, + 465, -1000, -1000, 616, 7579, 7579, -1000, -1000, 324, -1000, + 302, 302, 302, 82, -1000, -1000, 469, 7579, -1000, 101, + -1000, -117, -1000, 339, 337, -1000, 465, 729, 722, -1000, + -1000, 78, -1000, -1000, 749, -1000, 465, -1000, 482, 43, + -1000, 7579, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 193, + 666, -1000, 659, -1000, -1000, -1000, -37, -1000, 4719, 4719, + 303, 42, -144, 7745, 370, 303, 7579, -1000, -1000, 233, + -1000, -1000, 287, -1000, 7579, 125, 368, -1000, 630, -140, + -150, 358, -1000, -1000, -1000, -1000, -37, 643, -1000, 628, + -1000, -1000, -47, -142, -41, -145, 465, -151, 4907, -1000, + 1218, 303, -1000, -1000, } var yyPgo = [...]int{ - 0, 923, 28, 618, 922, 919, 917, 913, 912, 910, - 909, 908, 907, 905, 904, 898, 897, 896, 880, 879, - 87, 876, 875, 873, 41, 871, 56, 870, 869, 26, - 44, 22, 30, 102, 868, 19, 88, 64, 867, 37, - 866, 865, 862, 861, 53, 858, 857, 1111, 856, 855, - 8, 21, 853, 847, 845, 844, 51, 49, 843, 842, - 840, 839, 836, 835, 35, 2, 5, 4, 13, 833, - 696, 12, 831, 45, 828, 826, 825, 822, 24, 821, - 46, 820, 17, 43, 818, 15, 58, 33, 18, 6, - 817, 48, 816, 558, 814, 100, 813, 812, 811, 810, - 808, 34, 300, 852, 27, 25, 806, 804, 11, 1174, - 32, 55, 16, 800, 40, 38, 20, 799, 794, 23, - 792, 791, 788, 787, 786, 785, 84, 782, 780, 778, - 10, 42, 777, 776, 54, 9, 775, 774, 773, 772, - 50, 771, 36, 766, 765, 750, 31, 14, 749, 7, - 747, 746, 3, 745, 744, 743, 0, 83, 742, 738, - 74, + 0, 938, 10, 628, 936, 935, 934, 933, 930, 929, + 928, 927, 926, 925, 924, 922, 921, 918, 917, 916, + 914, 83, 913, 910, 909, 40, 908, 56, 907, 906, + 26, 66, 27, 31, 864, 905, 19, 45, 113, 904, + 28, 903, 901, 900, 898, 39, 895, 894, 1114, 893, + 891, 14, 21, 890, 889, 888, 887, 48, 153, 886, + 885, 884, 882, 881, 880, 36, 4, 3, 5, 12, + 879, 144, 7, 878, 33, 877, 876, 871, 869, 35, + 868, 37, 866, 16, 34, 863, 13, 43, 22, 18, + 8, 862, 77, 861, 528, 860, 110, 857, 856, 854, + 852, 849, 50, 274, 338, 46, 52, 848, 847, 9, + 1148, 47, 44, 15, 846, 20, 53, 23, 842, 831, + 25, 829, 827, 826, 825, 813, 812, 65, 811, 810, + 808, 29, 54, 807, 805, 42, 30, 804, 803, 802, + 796, 38, 795, 32, 794, 788, 787, 24, 17, 785, + 6, 784, 782, 2, 781, 773, 772, 0, 461, 770, + 767, 72, } var yyR1 = [...]int{ - 0, 154, 155, 155, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, - 2, 3, 4, 4, 5, 5, 6, 6, 23, 23, - 7, 8, 8, 158, 158, 42, 42, 86, 86, 9, - 9, 9, 90, 90, 90, 107, 107, 117, 117, 10, - 10, 10, 10, 15, 143, 144, 144, 144, 140, 120, - 120, 120, 123, 123, 121, 121, 121, 121, 121, 121, - 121, 122, 122, 122, 122, 122, 124, 124, 124, 124, - 124, 125, 125, 125, 125, 125, 125, 125, 125, 125, - 125, 125, 125, 125, 125, 139, 139, 126, 126, 134, - 134, 135, 135, 135, 132, 132, 133, 133, 136, 136, - 136, 127, 127, 127, 127, 127, 127, 129, 129, 137, - 137, 130, 130, 130, 131, 131, 138, 138, 138, 138, - 138, 128, 128, 141, 148, 148, 148, 148, 142, 142, - 150, 150, 149, 145, 145, 145, 146, 146, 146, 147, - 147, 147, 11, 11, 11, 11, 11, 153, 151, 151, - 152, 152, 12, 13, 13, 13, 14, 14, 16, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, - 17, 17, 118, 118, 118, 18, 18, 19, 19, 19, - 19, 19, 159, 20, 21, 21, 22, 22, 22, 26, - 26, 26, 24, 24, 25, 25, 31, 31, 30, 30, - 32, 32, 32, 32, 106, 106, 106, 105, 105, 34, - 34, 35, 35, 36, 36, 37, 37, 37, 49, 49, - 85, 85, 87, 87, 38, 38, 38, 38, 39, 39, - 40, 40, 41, 41, 113, 113, 112, 112, 112, 111, - 111, 43, 43, 43, 45, 44, 44, 44, 44, 46, - 46, 48, 48, 47, 47, 50, 50, 50, 50, 51, - 51, 33, 33, 33, 33, 33, 33, 33, 94, 94, - 53, 53, 52, 52, 52, 52, 52, 52, 52, 52, - 52, 52, 63, 63, 63, 63, 63, 63, 54, 54, - 54, 54, 54, 54, 54, 29, 29, 64, 64, 64, - 70, 65, 65, 57, 57, 57, 57, 57, 57, 57, - 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, - 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, - 57, 57, 57, 57, 61, 61, 61, 59, 59, 59, - 59, 59, 59, 59, 59, 59, 60, 60, 60, 60, - 60, 60, 60, 60, 160, 160, 62, 62, 62, 62, - 27, 27, 27, 27, 27, 116, 116, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, - 74, 74, 28, 28, 72, 72, 73, 75, 75, 71, - 71, 71, 56, 56, 56, 56, 56, 56, 56, 56, - 58, 58, 58, 76, 76, 77, 77, 78, 78, 79, - 79, 80, 81, 81, 81, 82, 82, 82, 82, 83, - 83, 83, 55, 55, 55, 55, 55, 55, 84, 84, - 84, 84, 88, 88, 66, 66, 68, 68, 67, 69, - 89, 89, 91, 92, 92, 95, 95, 96, 96, 93, - 93, 97, 97, 97, 97, 97, 97, 97, 97, 97, - 97, 97, 98, 98, 98, 99, 99, 100, 100, 100, - 108, 108, 103, 103, 104, 104, 109, 109, 110, 110, - 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, - 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, - 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, - 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, - 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, - 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, - 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, - 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, - 101, 101, 101, 101, 101, 101, 101, 101, 102, 102, + 0, 155, 156, 156, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, + 2, 2, 6, 3, 4, 4, 5, 5, 7, 7, + 24, 24, 8, 9, 9, 159, 159, 43, 43, 87, + 87, 10, 10, 10, 91, 91, 91, 108, 108, 118, + 118, 11, 11, 11, 11, 16, 144, 145, 145, 145, + 141, 121, 121, 121, 124, 124, 122, 122, 122, 122, + 122, 122, 122, 123, 123, 123, 123, 123, 125, 125, + 125, 125, 125, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 140, 140, 127, + 127, 135, 135, 136, 136, 136, 133, 133, 134, 134, + 137, 137, 137, 128, 128, 128, 128, 128, 128, 130, + 130, 138, 138, 131, 131, 131, 132, 132, 139, 139, + 139, 139, 139, 129, 129, 142, 149, 149, 149, 149, + 143, 143, 151, 151, 150, 146, 146, 146, 147, 147, + 147, 148, 148, 148, 12, 12, 12, 12, 12, 154, + 152, 152, 153, 153, 13, 14, 14, 14, 15, 15, + 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, + 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, + 18, 18, 18, 18, 119, 119, 119, 19, 19, 20, + 20, 20, 20, 20, 160, 21, 22, 22, 23, 23, + 23, 27, 27, 27, 25, 25, 26, 26, 32, 32, + 31, 31, 33, 33, 33, 33, 107, 107, 107, 106, + 106, 35, 35, 36, 36, 37, 37, 38, 38, 38, + 50, 50, 86, 86, 88, 88, 39, 39, 39, 39, + 40, 40, 41, 41, 42, 42, 114, 114, 113, 113, + 113, 112, 112, 44, 44, 44, 46, 45, 45, 45, + 45, 47, 47, 49, 49, 48, 48, 51, 51, 51, + 51, 52, 52, 34, 34, 34, 34, 34, 34, 34, + 95, 95, 54, 54, 53, 53, 53, 53, 53, 53, + 53, 53, 53, 53, 64, 64, 64, 64, 64, 64, + 55, 55, 55, 55, 55, 55, 55, 30, 30, 65, + 65, 65, 71, 66, 66, 58, 58, 58, 58, 58, + 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, + 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, + 58, 58, 58, 58, 58, 58, 62, 62, 62, 60, + 60, 60, 60, 60, 60, 60, 60, 60, 61, 61, + 61, 61, 61, 61, 61, 61, 161, 161, 63, 63, + 63, 63, 28, 28, 28, 28, 28, 117, 117, 120, + 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120, 75, 75, 29, 29, 73, 73, 74, 76, + 76, 72, 72, 72, 57, 57, 57, 57, 57, 57, + 57, 57, 59, 59, 59, 77, 77, 78, 78, 79, + 79, 80, 80, 81, 82, 82, 82, 83, 83, 83, + 83, 84, 84, 84, 56, 56, 56, 56, 56, 56, + 85, 85, 85, 85, 89, 89, 67, 67, 69, 69, + 68, 70, 90, 90, 92, 93, 93, 96, 96, 97, + 97, 94, 94, 98, 98, 98, 98, 98, 98, 98, + 98, 98, 98, 98, 99, 99, 99, 100, 100, 101, + 101, 101, 109, 109, 104, 104, 105, 105, 110, 110, + 111, 111, 102, 102, 102, 102, 102, 102, 102, 102, + 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, - 102, 102, 102, 102, 102, 102, 102, 102, 102, 156, - 157, 114, 115, 115, 115, + 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, + 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, + 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, + 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, + 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, + 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, + 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, + 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, + 103, 157, 158, 115, 116, 116, 116, } var yyR2 = [...]int{ 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 4, 6, - 7, 10, 1, 3, 1, 3, 7, 8, 1, 1, - 8, 8, 6, 1, 1, 1, 3, 0, 4, 3, - 4, 5, 1, 2, 1, 1, 1, 1, 1, 2, - 8, 4, 6, 4, 4, 1, 3, 3, 8, 3, - 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, - 1, 4, 4, 2, 2, 3, 3, 3, 3, 1, - 1, 1, 1, 1, 4, 1, 3, 0, 3, 0, - 5, 0, 3, 5, 0, 1, 0, 1, 0, 1, - 2, 0, 2, 2, 2, 2, 2, 0, 3, 0, - 1, 0, 3, 3, 0, 2, 0, 2, 1, 2, - 1, 0, 2, 5, 2, 3, 2, 2, 1, 1, - 1, 3, 2, 0, 1, 3, 1, 2, 3, 1, - 1, 1, 6, 7, 7, 4, 5, 7, 1, 3, - 8, 8, 5, 4, 6, 5, 3, 2, 3, 4, - 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, - 3, 4, 3, 3, 4, 2, 4, 2, 2, 2, - 2, 3, 0, 1, 1, 2, 1, 2, 2, 2, - 2, 2, 0, 2, 0, 2, 1, 2, 2, 0, - 1, 1, 0, 1, 0, 1, 0, 1, 1, 3, - 1, 2, 3, 5, 0, 1, 2, 1, 1, 0, - 2, 1, 3, 1, 1, 1, 3, 3, 3, 7, - 1, 3, 1, 3, 4, 4, 4, 3, 2, 4, - 0, 1, 0, 2, 0, 1, 0, 1, 2, 1, - 1, 1, 2, 2, 1, 2, 3, 2, 3, 2, - 2, 2, 1, 1, 3, 0, 5, 5, 5, 0, - 2, 1, 3, 3, 2, 3, 1, 2, 0, 3, - 1, 1, 3, 3, 4, 4, 5, 3, 4, 5, - 6, 2, 1, 2, 1, 2, 1, 2, 1, 1, - 1, 1, 1, 1, 1, 0, 2, 1, 1, 1, - 3, 1, 3, 1, 1, 1, 1, 1, 3, 3, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, + 6, 7, 5, 10, 1, 3, 1, 3, 7, 8, + 1, 1, 8, 8, 6, 1, 1, 1, 3, 0, + 4, 3, 4, 5, 1, 2, 1, 1, 1, 1, + 1, 2, 8, 4, 6, 4, 4, 1, 3, 3, + 8, 3, 1, 1, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 2, 2, 2, 2, 2, 1, 2, + 2, 2, 1, 4, 4, 2, 2, 3, 3, 3, + 3, 1, 1, 1, 1, 1, 4, 1, 3, 0, + 3, 0, 5, 0, 3, 5, 0, 1, 0, 1, + 0, 1, 2, 0, 2, 2, 2, 2, 2, 0, + 3, 0, 1, 0, 3, 3, 0, 2, 0, 2, + 1, 2, 1, 0, 2, 5, 2, 3, 2, 2, + 1, 1, 1, 3, 2, 0, 1, 3, 1, 2, + 3, 1, 1, 1, 6, 7, 7, 4, 5, 7, + 1, 3, 8, 8, 5, 4, 6, 5, 3, 2, + 3, 4, 4, 4, 4, 4, 4, 4, 4, 3, + 3, 3, 3, 4, 3, 3, 4, 2, 4, 2, + 2, 2, 2, 3, 0, 1, 1, 2, 1, 2, + 2, 2, 2, 2, 0, 2, 0, 2, 1, 2, + 2, 0, 1, 1, 0, 1, 0, 1, 0, 1, + 1, 3, 1, 2, 3, 5, 0, 1, 2, 1, + 1, 0, 2, 1, 3, 1, 1, 1, 3, 3, + 3, 7, 1, 3, 1, 3, 4, 4, 4, 3, + 2, 4, 0, 1, 0, 2, 0, 1, 0, 1, + 2, 1, 1, 1, 2, 2, 1, 2, 3, 2, + 3, 2, 2, 2, 1, 1, 3, 0, 5, 5, + 5, 0, 2, 1, 3, 3, 2, 3, 1, 2, + 0, 3, 1, 1, 3, 3, 4, 4, 5, 3, + 4, 5, 6, 2, 1, 2, 1, 2, 1, 2, + 1, 1, 1, 1, 1, 1, 1, 0, 2, 1, + 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, - 1, 1, 1, 1, 4, 5, 6, 4, 4, 6, - 6, 6, 9, 7, 5, 4, 2, 2, 2, 2, - 2, 2, 2, 2, 0, 2, 4, 4, 4, 4, - 0, 3, 4, 7, 3, 1, 1, 2, 3, 3, - 1, 2, 2, 1, 2, 1, 2, 2, 1, 2, - 0, 1, 0, 2, 1, 2, 4, 0, 2, 1, - 3, 5, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 2, 2, 0, 3, 0, 2, 0, 3, 1, - 3, 2, 0, 1, 1, 0, 2, 4, 4, 0, - 2, 4, 2, 1, 3, 5, 4, 6, 1, 3, - 3, 5, 0, 5, 1, 3, 1, 2, 3, 1, - 1, 3, 3, 1, 1, 0, 2, 0, 3, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, - 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, + 2, 3, 1, 1, 1, 1, 4, 5, 6, 4, + 4, 6, 6, 6, 9, 7, 5, 4, 2, 2, + 2, 2, 2, 2, 2, 2, 0, 2, 4, 4, + 4, 4, 0, 3, 4, 7, 3, 1, 1, 2, + 3, 3, 1, 2, 2, 1, 2, 1, 2, 2, + 1, 2, 0, 1, 0, 2, 1, 2, 4, 0, + 2, 1, 3, 5, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 2, 2, 0, 3, 0, 2, 0, + 3, 1, 3, 2, 0, 1, 1, 0, 2, 4, + 4, 0, 2, 4, 2, 1, 3, 5, 4, 6, + 1, 3, 3, 5, 0, 5, 1, 3, 1, 2, + 3, 1, 1, 3, 3, 1, 1, 0, 2, 0, + 3, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, + 1, 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -1686,247 +1668,249 @@ var yyR2 = [...]int{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 0, 0, 1, 1, + 1, 1, 1, 0, 0, 1, 1, } var yyChk = [...]int{ - -1000, -154, -1, -2, -6, -7, -8, -9, -10, -11, - -12, -13, -14, -16, -17, -18, -19, -3, -4, 6, - -23, 8, 9, 29, -15, 111, 112, 114, 113, 131, - 115, 124, 48, 24, 125, 126, 129, 130, -156, 7, - 201, 52, -155, 214, -78, 14, -22, 5, -20, -159, - -20, -20, -20, -20, -143, 52, -100, 118, 69, 116, - 122, -103, 55, -102, 207, 149, 143, 170, 162, 160, - 163, 189, 64, 127, 158, 154, 152, 26, 175, 212, - 153, 190, 147, 148, 174, 31, 209, 33, 135, 173, - 169, 172, 146, 168, 37, 188, 165, 155, 17, 130, - 133, 123, 137, 211, 151, 134, 129, 191, 36, 179, - 145, 141, 166, 136, 156, 157, 171, 144, 167, 138, - 180, 213, 164, 161, 142, 139, 140, 184, 185, 186, - 187, 210, 159, 181, -93, 118, 120, 116, 116, 117, - 118, 116, -47, -109, 55, -102, 118, 116, 105, 163, - 111, 182, 117, 31, 137, -118, 116, 183, 140, 184, - 185, 186, 187, 55, 191, 190, -109, -114, -114, -114, - -114, -114, -2, -82, 16, 15, -5, -3, -156, 6, - 19, 20, -26, 38, 39, -21, -93, -35, -36, -37, - -38, -49, -70, -156, -47, 10, -42, -47, -89, -117, - -90, -91, 191, 190, 189, 163, 188, -71, -103, -109, - 55, -102, -144, -140, 55, 117, -47, 201, -96, 121, - 116, -47, -47, -95, 121, 55, -95, -47, 108, -47, - 55, 29, 193, 55, 137, 116, 138, 118, -115, -156, - -104, -103, -101, 70, 21, 23, 177, 73, 105, 15, - 74, 104, 202, 111, 46, 194, 195, 192, 193, 182, - 28, 9, 24, 125, 20, 98, 113, 77, 78, 128, - 22, 126, 68, 18, 49, 10, 12, 13, 121, 120, - 89, 117, 44, 7, 107, 25, 86, 40, 27, 42, - 87, 16, 196, 197, 30, 206, 132, 100, 47, 34, - 71, 66, 50, 69, 14, 45, 88, 114, 201, 43, - 6, 205, 29, 124, 41, 116, 183, 76, 119, 67, - 5, 122, 8, 48, 51, 198, 199, 200, 32, 75, - 11, -115, -115, -115, 141, 142, -115, -115, 50, -115, - -157, 54, -83, 18, 30, -33, -52, 71, -57, 28, - 22, -56, -53, -71, -69, -70, 105, 106, 94, 95, - 102, 72, 107, -61, -59, -60, -62, 57, 56, 65, - 58, 59, 60, 61, 66, 67, 68, -103, -109, -67, - -156, 42, 43, 202, 203, 206, 204, 74, 32, 192, - 200, 199, 198, 196, 197, 194, 195, 121, 193, 100, - 201, -79, -80, -33, -78, -2, -20, 34, -24, 20, - 63, -48, 25, -47, 29, 53, -43, -45, -44, -46, - 40, 44, 46, 41, 42, 43, 47, -113, 21, -35, - -2, -156, -112, 133, -111, 21, -109, 57, -47, -158, - 53, 10, 51, 53, -89, -107, -104, 57, 29, 79, - 108, 54, 53, -120, -123, -125, -124, -121, -122, 160, - 161, 105, 164, 166, 167, 168, 169, 170, 171, 172, - 173, 174, 175, 127, 156, 157, 158, 159, 143, 144, - 145, 146, 147, 148, 149, 151, 152, 153, 154, 155, - 55, -115, 118, -47, 71, -47, -115, 119, -47, 22, - 50, -47, -110, -109, -101, -115, -115, -115, -115, -115, - -115, -115, -115, -115, -115, -47, 8, 89, 70, 69, - 86, 53, 17, -33, -54, 89, 71, 87, 88, 73, - 91, 90, 101, 94, 95, 96, 97, 98, 99, 100, - 92, 93, 104, 79, 80, 81, 82, 83, 84, 85, - -94, -156, -70, -156, 109, 110, -57, -57, -57, -57, - -57, -57, -57, -156, 108, -65, -33, -156, -156, -156, - -156, -156, -156, -156, -74, -33, -156, -160, -156, -160, - -160, -160, -160, -160, -160, -160, -156, -156, -156, -156, - 53, -81, 23, 24, -82, -157, -26, -58, -103, 58, - 61, -25, 41, -86, 133, -47, -89, -36, -37, -37, - -36, -37, 40, 40, 40, 45, 40, 45, 40, -44, - -109, -157, -157, -2, -50, 48, 120, 49, -156, -111, - -86, -35, -47, -91, -114, -33, -104, -110, -101, -145, - -146, -147, -104, 57, 58, -140, -141, -148, 123, 122, - -142, 117, 27, -136, 66, 71, -132, 180, -126, 52, - -126, -126, -126, -126, -130, 163, -130, -130, -130, 52, - -126, -126, -126, -134, 52, -134, -134, -135, 52, -135, - -108, 51, -47, 22, -97, 114, -153, 112, 177, 163, - 64, 28, 113, 14, 202, 133, 213, 55, 134, -47, - -47, -115, 36, -33, -33, -63, 66, 71, 67, 68, - -33, -33, -57, -64, -67, -70, 62, 89, 87, 88, - 73, -57, -57, -57, -57, -57, -57, -57, -57, -57, - -57, -57, -57, -57, -57, -57, -116, 55, 57, 55, - -56, -56, -103, -31, 20, -30, -32, 96, -33, -109, - -104, 53, -157, -30, -30, -33, -33, -30, -24, -72, - -73, 75, -103, -157, -30, -31, -30, -30, -80, -83, - -92, 18, 10, 32, 32, -30, -55, 29, 32, -2, - -156, -156, -51, 11, -40, -39, 50, 51, -41, 50, - -39, 40, 40, -157, 117, 117, 117, -87, -103, -51, - -51, 108, 53, -147, 79, 52, 27, -142, 55, 55, - -127, 28, 66, -133, 181, 58, -130, -130, -131, 104, - 29, -131, -131, -131, -139, 57, 58, 58, 50, -103, - -115, -114, -98, -99, 119, 21, 117, 27, 133, -115, - 37, 66, 67, 68, -64, -57, -57, -57, -29, 128, - 70, -157, -157, -30, 53, -106, -105, 21, -103, 57, - 108, -156, -33, -157, -157, 53, 51, 21, -157, -30, - -75, -73, 77, -33, -157, -157, -157, -157, -157, -47, - -34, 10, -88, 50, -89, -66, -68, -67, -156, -2, - -84, -103, -87, -78, -33, -33, 52, -33, -156, -156, - -156, -157, 53, -78, -104, -146, -147, -150, -149, -103, - 55, -129, 50, 57, 58, 59, 66, 192, 54, -131, - -131, 55, 55, 105, 54, 53, 53, 54, 53, -47, - -47, -114, -103, -29, 70, -57, -57, -157, -32, -105, - 96, -110, -31, -119, 105, 160, 127, 158, 154, 174, - 165, 179, 156, 180, -116, -119, 207, -78, 78, -33, - 76, -51, -35, 26, -88, 53, -157, -157, -157, 53, - 108, -157, -82, -85, -103, -85, -85, -85, -112, -103, - -82, 54, 53, -126, -137, 177, 8, 57, 58, 58, - -115, 25, -57, 108, -157, -157, -126, -126, -126, -135, - -126, 148, -126, 148, -157, -157, -156, -28, 205, -33, - -76, 12, 27, -68, 32, -2, -156, -103, -103, 53, - 54, -157, -157, -157, -50, -108, -149, -138, 123, 27, - 122, 192, 54, 54, -156, 96, -130, 55, -57, -157, - 57, -77, 13, 15, 8, -66, -2, 108, -103, -128, - 64, 27, 27, -151, -152, 133, -27, 89, 210, -33, - -65, -89, -157, -103, 57, -157, 53, -103, -157, 208, - 47, 211, -152, 32, 37, 209, 212, 135, 37, 136, - 210, -156, 211, -57, 132, 212, -157, -157, + -1000, -155, -1, -2, -6, -7, -8, -9, -10, -11, + -12, -13, -14, -15, -17, -18, -19, -20, -3, -4, + 6, 7, -24, 9, 10, 30, -16, 112, 113, 115, + 114, 132, 116, 125, 49, 25, 126, 127, 130, 131, + -157, 8, 202, 53, -156, 215, -79, 15, -23, 5, + -21, -160, -21, -21, -21, -21, -21, -144, 53, -101, + 119, 70, 117, 123, -104, 56, -103, 208, 150, 144, + 171, 163, 161, 164, 190, 65, 128, 159, 155, 153, + 27, 176, 213, 154, 191, 148, 149, 175, 32, 210, + 34, 136, 174, 170, 173, 147, 169, 38, 189, 166, + 156, 18, 131, 134, 124, 138, 212, 152, 135, 130, + 192, 37, 180, 146, 142, 167, 137, 157, 158, 172, + 145, 168, 139, 181, 214, 165, 162, 143, 140, 141, + 185, 186, 187, 188, 211, 160, 182, -94, 119, 121, + 117, 117, 118, 119, 117, -48, -110, 56, -103, 119, + 117, 106, 164, 112, 183, 118, 32, 138, -119, 117, + 184, 141, 185, 186, 187, 188, 56, 192, 191, -110, + -115, -115, -115, -115, -115, -2, -83, 17, 16, -5, + -3, -157, 6, 20, 21, -27, 39, 40, -22, -33, + 97, -34, -110, -53, 72, -58, 29, 56, -103, 23, + -57, -54, -72, -70, -71, 106, 107, 95, 96, 103, + 73, 108, -62, -60, -61, -63, 58, 57, 66, 59, + 60, 61, 62, 67, 68, 69, -104, -68, -157, 43, + 44, 203, 204, 207, 205, 75, 33, 193, 201, 200, + 199, 197, 198, 195, 196, 122, 194, 101, 202, -94, + -36, -37, -38, -39, -50, -71, -157, -48, 11, -43, + -48, -90, -118, -91, -92, 192, 191, 190, 164, 189, + -72, -104, -110, -145, -141, 56, 118, -48, 202, -97, + 122, 117, -48, -48, -96, 122, 56, -96, -48, 109, + -48, 56, 30, 194, 56, 138, 117, 139, 119, -116, + -157, -105, -104, -102, 71, 22, 24, 178, 74, 106, + 16, 75, 105, 203, 112, 47, 195, 196, 193, 194, + 183, 29, 10, 25, 126, 21, 99, 114, 78, 79, + 129, 23, 127, 69, 19, 50, 11, 13, 14, 122, + 121, 90, 118, 45, 8, 108, 26, 87, 41, 28, + 43, 88, 17, 197, 198, 31, 207, 133, 101, 48, + 35, 72, 67, 51, 70, 15, 46, 89, 115, 202, + 44, 6, 206, 30, 125, 42, 117, 184, 77, 120, + 68, 5, 123, 9, 49, 52, 199, 200, 201, 33, + 76, 12, -116, -116, -116, 142, 143, -116, -116, 51, + -116, -158, 55, -84, 19, 31, -34, -110, -80, -81, + -34, -79, -2, -21, 35, -25, 21, 64, 11, -107, + 71, 70, 87, -106, 22, -104, 58, 109, -34, -55, + 90, 72, 88, 89, 74, 92, 91, 102, 95, 96, + 97, 98, 99, 100, 101, 93, 94, 105, 80, 81, + 82, 83, 84, 85, 86, -95, -157, -71, -157, 110, + 111, -58, -58, -58, -58, -58, -58, -58, -157, -2, + -66, -34, -157, -157, -157, -157, -157, -157, -157, -75, + -34, -157, -161, -157, -161, -161, -161, -161, -161, -161, + -161, -157, -157, -157, -157, -49, 26, -48, 30, 54, + -44, -46, -45, -47, 41, 45, 47, 42, 43, 44, + 48, -114, 22, -36, -157, -113, 134, -112, 22, -110, + 58, -48, -159, 54, 11, 52, 54, -90, -108, -105, + 58, 30, 80, 109, 55, 54, -121, -124, -126, -125, + -122, -123, 161, 162, 106, 165, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 128, 157, 158, 159, + 160, 144, 145, 146, 147, 148, 149, 150, 152, 153, + 154, 155, 156, 56, -116, 119, -48, 72, -48, -116, + 120, -48, 23, 51, -48, -111, -110, -102, -116, -116, + -116, -116, -116, -116, -116, -116, -116, -116, -48, 9, + 90, 54, 18, 109, 54, -82, 24, 25, -83, -158, + -27, -59, -104, 59, 62, -26, 42, -48, -34, -34, + -64, 67, 72, 68, 69, -106, 97, -111, -105, -102, + -58, -65, -68, -71, 63, 90, 88, 89, 74, -58, + -58, -58, -58, -58, -58, -58, -58, -58, -58, -58, + -58, -58, -58, -58, -117, 56, 58, 56, -57, -57, + -104, -32, 21, -31, -33, -158, 54, -158, -2, -31, + -31, -34, -34, -31, -25, -73, -74, 76, -104, -158, + -31, -32, -31, -31, -87, 134, -48, -90, -37, -38, + -38, -37, -38, 41, 41, 41, 46, 41, 46, 41, + -45, -110, -158, -51, 49, 121, 50, -157, -112, -87, + -36, -48, -92, -115, -34, -105, -111, -146, -147, -148, + -105, 58, 59, -141, -142, -149, 124, 123, -143, 118, + 28, -137, 67, 72, -133, 181, -127, 53, -127, -127, + -127, -127, -131, 164, -131, -131, -131, 53, -127, -127, + -127, -135, 53, -135, -135, -136, 53, -136, -109, 52, + -48, 23, -98, 115, -154, 113, 178, 164, 65, 29, + 114, 15, 203, 134, 214, 56, 135, -48, -48, -116, + 37, -34, -34, -81, -84, -93, 19, 11, 33, 33, + -31, 67, 68, 69, 109, -157, -65, -58, -58, -58, + -30, 129, 71, -158, -158, -31, 54, -34, -158, -158, + -158, 54, 52, 22, -158, -31, -76, -74, 78, -34, + -158, -158, -158, -158, -158, -56, 30, 33, -2, -157, + -157, -52, 12, -41, -40, 51, 52, -42, 51, -40, + 41, 41, 118, 118, 118, -88, -104, -52, -52, 109, + 54, -148, 80, 53, 28, -143, 56, 56, -128, 29, + 67, -134, 182, 59, -131, -131, -132, 105, 30, -132, + -132, -132, -140, 58, 59, 59, 51, -104, -116, -115, + -99, -100, 120, 22, 118, 28, 134, -116, 38, -48, + -35, 11, 97, -105, -32, -30, 71, -58, -58, -158, + -33, -120, 106, 161, 128, 159, 155, 175, 166, 180, + 157, 181, -117, -120, 208, -79, 79, -34, 77, -89, + 51, -90, -67, -69, -68, -157, -2, -85, -104, -88, + -79, -34, -34, 53, -34, -157, -157, -157, -158, 54, + -79, -147, -148, -151, -150, -104, 56, -130, 51, 58, + 59, 60, 67, 193, 55, -132, -132, 56, 56, 106, + 55, 54, 54, 55, 54, -48, -48, -115, -104, -52, + -36, -158, -58, -158, -127, -127, -127, -136, -127, 149, + -127, 149, -158, -158, -157, -29, 206, -34, 27, -89, + 54, -158, -158, -158, 54, 109, -158, -83, -86, -104, + -86, -86, -86, -113, -104, -83, 55, 54, -127, -138, + 178, 9, 58, 59, 59, -116, 26, -77, 13, -131, + 56, -58, -158, 58, 28, -69, 33, -2, -157, -104, + -104, 54, 55, -158, -158, -158, -51, -109, -150, -139, + 124, 28, 123, 193, 55, 55, -157, -78, 14, 16, + -28, 90, 211, 9, -67, -2, 109, -104, -129, 65, + 28, 28, -152, -153, 134, -34, -66, -158, 209, 48, + 212, -90, -158, -104, 58, -158, 54, -104, 38, 210, + 213, -153, 33, 38, 136, 211, 137, 212, -157, 213, + -58, 133, -158, -158, } var yyDef = [...]int{ 0, -2, 2, -2, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 427, 0, 202, - 202, 202, 202, 202, 0, 487, 469, 0, 0, 0, - 0, 192, 196, 661, 661, 661, 661, 661, 0, 28, - 29, 659, 1, 3, 435, 0, 0, 206, 209, 204, - 469, 0, 0, 0, 49, 0, 0, 649, 0, 467, - 488, 489, 492, 493, 588, 589, 590, 591, 592, 593, - 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, - 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, - 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, - 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, - 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, - 644, 645, 646, 647, 648, 650, 651, 652, 653, 654, - 655, 656, 657, 658, 0, 0, 470, 0, 465, 0, - 465, 0, 167, 273, 496, 497, 649, 0, 0, 0, - 0, 662, 662, 662, 662, 0, 662, 662, 185, 187, - 188, 189, 190, 662, 193, 194, 195, 197, 198, 199, - 200, 201, 22, 439, 0, 0, 427, 24, 0, 202, - 207, 208, 212, 210, 211, 203, 0, 0, 231, 233, - 234, 235, 254, 0, 256, 0, 0, 35, 39, 0, - 0, 460, -2, -2, -2, 594, -2, 0, 409, 0, - -2, -2, 0, 55, 0, 0, 662, 0, 0, 0, - 0, 662, 0, 0, 0, 0, 0, 166, 0, 168, - 662, 662, 662, 662, 662, 662, 662, 662, 177, 663, - 664, 494, 495, 500, 501, 502, 503, 504, 505, 506, - 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, - 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, - 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, - 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, - 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, - 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, - 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, - 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, - 587, 178, 179, 180, 662, 662, 182, 183, 0, 191, - 23, 660, 18, 0, 0, 436, 281, 0, 286, 288, - 0, 323, 324, 325, 326, 327, 0, 0, 0, 0, - 0, 0, 0, 350, 351, 352, 353, 412, 413, 414, - 415, 416, 417, 418, 419, 290, 291, 409, 0, 459, - 0, 0, 0, 0, 0, 0, 0, 400, 0, 374, - 374, 374, 374, 374, 374, 374, 374, 0, 0, 0, - 0, 428, 429, 432, 435, 22, 209, 0, 214, 213, - 205, 37, 0, 272, 0, 0, 0, 0, 0, 0, - 261, 0, 0, 264, 0, 0, 0, 0, 255, 0, - 22, 0, 275, 624, 257, 0, 259, 260, 37, 0, - 0, 33, 34, 0, 40, 661, 45, 46, 43, 0, - 0, 143, 0, 108, 104, 60, 61, 97, 63, 97, - 97, 97, 97, 121, 121, 121, 121, 89, 90, 91, - 92, 93, 0, 76, 97, 97, 97, 80, 64, 65, - 66, 67, 68, 69, 70, 99, 99, 99, 101, 101, - 490, 51, 0, 53, 0, 0, 155, 0, 163, 466, - 0, 662, 274, 498, 499, 169, 170, 171, 172, 173, - 174, 175, 176, 181, 184, 186, 440, 0, 0, 0, - 0, 0, 0, 284, 0, 0, 0, 0, 0, 0, + 11, 12, 13, 14, 15, 16, 17, 18, 429, 0, + 204, 204, 204, 204, 204, 204, 0, 489, 471, 0, + 0, 0, 0, 194, 198, 663, 663, 663, 663, 663, + 0, 30, 31, 661, 1, 3, 437, 0, 0, 208, + 211, 206, 0, 471, 0, 0, 0, 51, 0, 0, + 651, 0, 469, 490, 491, 494, 495, 590, 591, 592, + 593, 594, 595, 596, 597, 598, 599, 600, 601, 602, + 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, + 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, + 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, + 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, + 643, 644, 645, 646, 647, 648, 649, 650, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 0, 0, 472, + 0, 467, 0, 467, 0, 169, 275, 498, 499, 651, + 0, 0, 0, 0, 664, 664, 664, 664, 0, 664, + 664, 187, 189, 190, 191, 192, 664, 195, 196, 197, + 199, 200, 201, 202, 203, 24, 441, 0, 0, 429, + 26, 0, 204, 209, 210, 214, 212, 213, 205, 0, + 222, 226, 0, 283, 0, 288, 290, -2, -2, 0, + 325, 326, 327, 328, 329, 0, 0, 0, 0, 0, + 0, 0, 352, 353, 354, 355, 414, 415, 416, 417, + 418, 419, 420, 421, 292, 293, 411, 461, 0, 0, + 0, 0, 0, 0, 0, 402, 0, 376, 376, 376, + 376, 376, 376, 376, 376, 0, 0, 0, 0, 0, + 0, 233, 235, 236, 237, 256, 0, 258, 0, 0, + 37, 41, 0, 0, 462, -2, -2, -2, 596, -2, + 0, 411, 0, 0, 57, 0, 0, 664, 0, 0, + 0, 0, 664, 0, 0, 0, 0, 0, 168, 0, + 170, 664, 664, 664, 664, 664, 664, 664, 664, 179, + 665, 666, 496, 497, 502, 503, 504, 505, 506, 507, + 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, + 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, + 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, + 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, + 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, + 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, + 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, + 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, + 588, 589, 180, 181, 182, 664, 664, 184, 185, 0, + 193, 25, 662, 19, 0, 0, 438, 0, 430, 431, + 434, 437, 24, 211, 0, 216, 215, 207, 0, 223, + 0, 0, 0, 227, 0, 229, 230, 0, 286, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 308, 309, 310, 311, 312, 313, 314, - 287, 0, 301, 0, 0, 0, 343, 344, 345, 346, - 347, 348, 0, 216, 0, 0, 321, 0, 0, 0, - 0, 0, 0, 212, 0, 401, 0, 366, 0, 367, - 368, 369, 370, 371, 372, 373, 0, 216, 0, 0, - 0, 431, 433, 434, 439, 25, 212, 0, 420, 0, - 0, 0, 215, 0, 0, 271, 279, 232, 250, 252, - 0, 247, 262, 263, 265, 0, 267, 0, 269, 270, - 236, 237, 320, 22, 238, 0, 0, 0, 0, 258, - 279, 279, 36, 461, 41, 462, 410, 0, -2, 54, - 144, 146, 149, 150, 151, 56, 57, 0, 0, 0, - 0, 138, 139, 111, 109, 0, 106, 105, 62, 0, - 121, 121, 83, 84, 124, 0, 124, 124, 124, 0, - 77, 78, 79, 71, 0, 72, 73, 74, 0, 75, - 0, 0, 662, 468, 661, 482, 156, 471, 472, 473, - 474, 475, 476, 477, 478, 479, 480, 481, 0, 162, - 662, 165, 0, 282, 283, 285, 302, 0, 304, 306, - 437, 438, 292, 293, 317, 318, 319, 0, 0, 0, - 0, 315, 297, 0, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 342, 385, 386, 0, - 340, 341, 349, 0, 0, 217, 218, 220, 224, 0, - 410, 0, 458, 0, 0, 0, 0, 0, 0, 407, - 404, 0, 0, 375, 0, 0, 0, 0, 430, 19, - 0, 463, 464, 421, 422, 229, 452, 0, 0, -2, - 0, 0, 427, 0, 244, 251, 0, 0, 245, 0, - 246, 266, 268, -2, 0, 0, 0, 0, 242, 427, - 32, 0, 0, 147, 0, 0, 134, 0, 136, 137, - 117, 0, 110, 59, 107, 0, 124, 124, 85, 0, - 0, 86, 87, 88, 0, 95, 0, 0, 0, 491, - 52, 152, 0, 661, 483, 484, 485, 486, 0, 164, - 441, 303, 305, 307, 294, 315, 298, 0, 295, 0, - 0, 289, 354, 0, 0, 221, 225, 0, 227, 228, - 0, 216, 322, 357, 358, 0, 0, 0, 0, 427, - 0, 405, 0, 0, 365, 376, 377, 378, 379, 20, - 279, 0, 26, 0, 452, 442, 454, 456, 0, 22, - 0, 448, 0, 435, 280, 248, 0, 253, 0, 0, - 0, 256, 0, 435, 411, 145, 148, 0, 140, 97, - 135, 119, 0, 112, 113, 114, 115, 116, 98, 81, - 82, 125, 122, 123, 94, 0, 0, 102, 0, 662, - 153, 154, 0, 296, 0, 316, 299, 355, 219, 226, - 222, 0, 0, 0, 97, 97, 390, 97, 101, 393, - 97, 395, 97, 398, 0, 0, 0, 402, 364, 408, - 0, 423, 230, 0, 27, 0, 457, -2, 0, 0, - 0, 38, 30, 0, 240, 0, 0, 0, 275, 243, - 31, 490, 0, 142, 126, 120, 0, 96, 0, 0, - 50, 0, 300, 0, 356, 359, 387, 121, 391, 392, - 394, 396, 397, 399, 361, 360, 0, 0, 0, 406, - 425, 0, 0, 455, 0, -2, 0, 450, 449, 0, - 249, 276, 277, 278, 239, 133, 141, 131, 0, 128, - 130, 118, 100, 103, 0, 223, 388, 389, 380, 363, - 403, 21, 0, 0, 0, 445, 22, 0, 241, 58, - 0, 127, 129, 0, 158, 0, 0, 0, 0, 426, - 424, 453, -2, 451, 132, 157, 0, 0, 362, 0, - 0, 0, 159, 0, 381, 0, 384, 0, 382, 0, - 0, 0, 0, 0, 0, 383, 160, 161, + 0, 0, 0, 0, 0, 0, 0, 0, 310, 311, + 312, 313, 314, 315, 316, 289, 0, 303, 0, 0, + 0, 345, 346, 347, 348, 349, 350, 0, 218, 24, + 0, 323, 0, 0, 0, 0, 0, 0, 214, 0, + 403, 0, 368, 0, 369, 370, 371, 372, 373, 374, + 375, 0, 218, 0, 0, 39, 0, 274, 0, 0, + 0, 0, 0, 0, 263, 0, 0, 266, 0, 0, + 0, 0, 257, 0, 0, 277, 626, 259, 0, 261, + 262, 39, 0, 0, 35, 36, 0, 42, 663, 47, + 48, 45, 0, 0, 145, 0, 110, 106, 62, 63, + 99, 65, 99, 99, 99, 99, 123, 123, 123, 123, + 91, 92, 93, 94, 95, 0, 78, 99, 99, 99, + 82, 66, 67, 68, 69, 70, 71, 72, 101, 101, + 101, 103, 103, 492, 53, 0, 55, 0, 0, 157, + 0, 165, 468, 0, 664, 276, 500, 501, 171, 172, + 173, 174, 175, 176, 177, 178, 183, 186, 188, 442, + 0, 0, 0, 0, 0, 433, 435, 436, 441, 27, + 214, 0, 422, 0, 0, 0, 217, 22, 284, 285, + 287, 304, 0, 306, 308, 228, 224, 0, 412, -2, + 294, 295, 319, 320, 321, 0, 0, 0, 0, 317, + 299, 0, 330, 331, 332, 333, 334, 335, 336, 337, + 338, 339, 340, 341, 344, 387, 388, 0, 342, 343, + 351, 0, 0, 219, 220, 322, 0, 460, 24, 0, + 0, 0, 0, 0, 0, 409, 406, 0, 0, 377, + 0, 0, 0, 0, 0, 0, 273, 281, 234, 252, + 254, 0, 249, 264, 265, 267, 0, 269, 0, 271, + 272, 238, 239, 240, 0, 0, 0, 0, 260, 281, + 281, 38, 463, 43, 464, 412, 0, 56, 146, 148, + 151, 152, 153, 58, 59, 0, 0, 0, 0, 140, + 141, 113, 111, 0, 108, 107, 64, 0, 123, 123, + 85, 86, 126, 0, 126, 126, 126, 0, 79, 80, + 81, 73, 0, 74, 75, 76, 0, 77, 0, 0, + 664, 470, 663, 484, 158, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 0, 164, 664, 167, + 0, 439, 440, 432, 20, 0, 465, 466, 423, 424, + 231, 305, 307, 309, 0, 218, 296, 317, 300, 0, + 297, 0, 0, 291, 356, 0, 0, 324, -2, 359, + 360, 0, 0, 0, 0, 429, 0, 407, 0, 0, + 367, 378, 379, 380, 381, 454, 0, 0, -2, 0, + 0, 429, 0, 246, 253, 0, 0, 247, 0, 248, + 268, 270, 0, 0, 0, 0, 244, 429, 34, 0, + 0, 149, 0, 0, 136, 0, 138, 139, 119, 0, + 112, 61, 109, 0, 126, 126, 87, 0, 0, 88, + 89, 90, 0, 97, 0, 0, 0, 493, 54, 154, + 0, 663, 485, 486, 487, 488, 0, 166, 443, 21, + 281, 0, 225, 413, 0, 298, 0, 318, 301, 357, + 221, 0, 99, 99, 392, 99, 103, 395, 99, 397, + 99, 400, 0, 0, 0, 404, 366, 410, 0, 28, + 0, 454, 444, 456, 458, 0, 24, 0, 450, 0, + 437, 282, 250, 0, 255, 0, 0, 0, 258, 0, + 437, 147, 150, 0, 142, 99, 137, 121, 0, 114, + 115, 116, 117, 118, 100, 83, 84, 127, 124, 125, + 96, 0, 0, 104, 0, 664, 155, 156, 0, 425, + 232, 358, 302, 361, 389, 123, 393, 394, 396, 398, + 399, 401, 363, 362, 0, 0, 0, 408, 0, 29, + 0, 459, -2, 0, 0, 0, 40, 32, 0, 242, + 0, 0, 0, 277, 245, 33, 492, 0, 144, 128, + 122, 0, 98, 0, 0, 52, 0, 427, 0, 390, + 391, 382, 365, 405, 0, 457, 0, -2, 0, 452, + 451, 0, 251, 278, 279, 280, 241, 135, 143, 133, + 0, 130, 132, 120, 102, 105, 0, 23, 0, 0, + 0, 0, 0, 0, 447, 24, 0, 243, 60, 0, + 129, 131, 0, 160, 0, 428, 426, 364, 0, 0, + 0, 455, -2, 453, 134, 159, 0, 0, 383, 0, + 386, 161, 0, 384, 0, 0, 0, 0, 0, 385, + 0, 0, 162, 163, } var yyTok1 = [...]int{ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 72, 3, 3, 3, 99, 91, 3, - 52, 54, 96, 94, 53, 95, 108, 97, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 214, - 80, 79, 81, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 73, 3, 3, 3, 100, 92, 3, + 53, 55, 97, 95, 54, 96, 109, 98, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 215, + 81, 80, 82, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 101, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 102, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 90, 3, 102, + 3, 3, 3, 3, 91, 3, 103, } var yyTok2 = [...]int{ @@ -1935,11 +1919,11 @@ var yyTok2 = [...]int{ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, - 65, 66, 67, 68, 69, 70, 71, 73, 74, 75, - 76, 77, 78, 82, 83, 84, 85, 86, 87, 88, - 89, 92, 93, 98, 100, 103, 104, 105, 106, 107, - 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 52, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, + 76, 77, 78, 79, 83, 84, 85, 86, 87, 88, + 89, 90, 93, 94, 99, 101, 104, 105, 106, 107, + 108, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, @@ -1949,7 +1933,7 @@ var yyTok2 = [...]int{ 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, - 209, 210, 211, 212, 213, + 209, 210, 211, 212, 213, 214, } var yyTok3 = [...]int{ 0, @@ -2314,9 +2298,9 @@ yydefault: { yyVAL.statement = yyDollar[1].selStmt } - case 18: + case 19: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:306 + //line sql.y:307 { sel := yyDollar[1].selStmt.(*Select) sel.OrderBy = yyDollar[2].orderBy @@ -2324,51 +2308,57 @@ yydefault: sel.Lock = yyDollar[4].str yyVAL.selStmt = sel } - case 19: + case 20: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:314 + //line sql.y:315 { yyVAL.selStmt = &Union{Type: yyDollar[2].str, Left: yyDollar[1].selStmt, Right: yyDollar[3].selStmt, OrderBy: yyDollar[4].orderBy, Limit: yyDollar[5].limit, Lock: yyDollar[6].str} } - case 20: + case 21: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:318 + //line sql.y:319 { yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, SelectExprs: SelectExprs{Nextval{Expr: yyDollar[5].expr}}, From: TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}} } - case 21: - yyDollar = yyS[yypt-10 : yypt+1] + case 22: + yyDollar = yyS[yypt-5 : yypt+1] //line sql.y:325 + { + yyVAL.statement = &Stream{Comments: Comments(yyDollar[2].bytes2), SelectExpr: yyDollar[3].selectExpr, Table: yyDollar[5].tableName} + } + case 23: + yyDollar = yyS[yypt-10 : yypt+1] + //line sql.y:332 { yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, Distinct: yyDollar[4].str, Hints: yyDollar[5].str, SelectExprs: yyDollar[6].selectExprs, From: yyDollar[7].tableExprs, Where: NewWhere(WhereStr, yyDollar[8].expr), GroupBy: GroupBy(yyDollar[9].exprs), Having: NewWhere(HavingStr, yyDollar[10].expr)} } - case 22: + case 24: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:331 + //line sql.y:338 { yyVAL.selStmt = yyDollar[1].selStmt } - case 23: + case 25: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:335 + //line sql.y:342 { yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} } - case 24: + case 26: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:341 + //line sql.y:348 { yyVAL.selStmt = yyDollar[1].selStmt } - case 25: + case 27: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:345 + //line sql.y:352 { yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} } - case 26: + case 28: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:352 + //line sql.y:359 { // insert_data returns a *Insert pre-filled with Columns & Values ins := yyDollar[6].ins @@ -2380,9 +2370,9 @@ yydefault: ins.OnDup = OnDup(yyDollar[7].updateExprs) yyVAL.statement = ins } - case 27: + case 29: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:364 + //line sql.y:371 { cols := make(Columns, 0, len(yyDollar[7].updateExprs)) vals := make(ValTuple, 0, len(yyDollar[8].updateExprs)) @@ -2392,174 +2382,174 @@ yydefault: } yyVAL.statement = &Insert{Action: yyDollar[1].str, Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, Table: yyDollar[4].tableName, Partitions: yyDollar[5].partitions, Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprs)} } - case 28: + case 30: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:376 + //line sql.y:383 { yyVAL.str = InsertStr } - case 29: + case 31: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:380 + //line sql.y:387 { yyVAL.str = ReplaceStr } - case 30: + case 32: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:386 + //line sql.y:393 { yyVAL.statement = &Update{Comments: Comments(yyDollar[2].bytes2), TableExprs: yyDollar[3].tableExprs, Exprs: yyDollar[5].updateExprs, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit} } - case 31: + case 33: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:392 + //line sql.y:399 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[4].tableName}}, Partitions: yyDollar[5].partitions, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit} } - case 32: + case 34: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:396 + //line sql.y:403 { yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[3].tableNames, TableExprs: yyDollar[5].tableExprs, Where: NewWhere(WhereStr, yyDollar[6].expr)} } - case 33: + case 35: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:401 + //line sql.y:408 { } - case 34: + case 36: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:402 + //line sql.y:409 { } - case 35: + case 37: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:406 + //line sql.y:413 { yyVAL.tableNames = TableNames{yyDollar[1].tableName} } - case 36: + case 38: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:410 + //line sql.y:417 { yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) } - case 37: + case 39: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:415 + //line sql.y:422 { yyVAL.partitions = nil } - case 38: + case 40: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:419 + //line sql.y:426 { yyVAL.partitions = yyDollar[3].partitions } - case 39: + case 41: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:425 + //line sql.y:432 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[3].updateExprs} } - case 40: + case 42: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:429 + //line sql.y:436 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].str, Exprs: yyDollar[4].updateExprs} } - case 41: + case 43: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:433 + //line sql.y:440 { yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Charset: yyDollar[4].colIdent} } - case 45: + case 47: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:444 + //line sql.y:451 { yyVAL.colIdent = yyDollar[1].colIdent } - case 46: + case 48: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:448 + //line sql.y:455 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 47: + case 49: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:454 + //line sql.y:461 { yyVAL.str = SessionStr } - case 48: + case 50: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:458 + //line sql.y:465 { yyVAL.str = GlobalStr } - case 49: + case 51: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:464 + //line sql.y:471 { yyDollar[1].ddl.TableSpec = yyDollar[2].TableSpec yyVAL.statement = yyDollar[1].ddl } - case 50: + case 52: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:469 + //line sql.y:476 { // Change this to an alter statement yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[7].tableName, NewName: yyDollar[7].tableName} } - case 51: + case 53: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:474 + //line sql.y:481 { yyVAL.statement = &DDL{Action: CreateStr, NewName: yyDollar[3].tableName.ToViewName()} } - case 52: + case 54: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:478 + //line sql.y:485 { yyVAL.statement = &DDL{Action: CreateStr, NewName: yyDollar[5].tableName.ToViewName()} } - case 53: + case 55: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:484 + //line sql.y:491 { yyVAL.ddl = &DDL{Action: CreateStr, NewName: yyDollar[4].tableName} setDDL(yylex, yyVAL.ddl) } - case 54: + case 56: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:491 + //line sql.y:498 { yyVAL.TableSpec = yyDollar[2].TableSpec yyVAL.TableSpec.Options = yyDollar[4].str } - case 55: + case 57: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:498 + //line sql.y:505 { yyVAL.TableSpec = &TableSpec{} yyVAL.TableSpec.AddColumn(yyDollar[1].columnDefinition) } - case 56: + case 58: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:503 + //line sql.y:510 { yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition) } - case 57: + case 59: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:507 + //line sql.y:514 { yyVAL.TableSpec.AddIndex(yyDollar[3].indexDefinition) } - case 58: + case 60: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:513 + //line sql.y:520 { yyDollar[2].columnType.NotNull = yyDollar[3].boolVal yyDollar[2].columnType.Default = yyDollar[4].optVal @@ -2569,645 +2559,645 @@ yydefault: yyDollar[2].columnType.Comment = yyDollar[8].optVal yyVAL.columnDefinition = &ColumnDefinition{Name: NewColIdent(string(yyDollar[1].bytes)), Type: yyDollar[2].columnType} } - case 59: + case 61: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:524 + //line sql.y:531 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Unsigned = yyDollar[2].boolVal yyVAL.columnType.Zerofill = yyDollar[3].boolVal } - case 62: + case 64: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:534 + //line sql.y:541 { yyVAL.columnType = yyDollar[1].columnType yyVAL.columnType.Length = yyDollar[2].optVal } - case 63: + case 65: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:539 + //line sql.y:546 { yyVAL.columnType = yyDollar[1].columnType } - case 64: + case 66: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:545 + //line sql.y:552 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 65: + case 67: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:549 + //line sql.y:556 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 66: + case 68: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:553 + //line sql.y:560 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 67: + case 69: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:557 + //line sql.y:564 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 68: + case 70: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:561 + //line sql.y:568 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 69: + case 71: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:565 + //line sql.y:572 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 70: + case 72: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:569 + //line sql.y:576 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 71: + case 73: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:575 + //line sql.y:582 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 72: + case 74: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:581 + //line sql.y:588 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 73: + case 75: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:587 + //line sql.y:594 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 74: + case 76: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:593 + //line sql.y:600 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 75: + case 77: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:599 + //line sql.y:606 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 76: + case 78: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:607 + //line sql.y:614 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 77: + case 79: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:611 + //line sql.y:618 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 78: + case 80: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:615 + //line sql.y:622 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 79: + case 81: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:619 + //line sql.y:626 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 80: + case 82: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:623 + //line sql.y:630 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 81: + case 83: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:629 + //line sql.y:636 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} } - case 82: + case 84: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:633 + //line sql.y:640 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} } - case 83: + case 85: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:637 + //line sql.y:644 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 84: + case 86: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:641 + //line sql.y:648 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 85: + case 87: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:645 + //line sql.y:652 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } - case 86: + case 88: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:649 + //line sql.y:656 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } - case 87: + case 89: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:653 + //line sql.y:660 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } - case 88: + case 90: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:657 + //line sql.y:664 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} } - case 89: + case 91: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:661 + //line sql.y:668 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 90: + case 92: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:665 + //line sql.y:672 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 91: + case 93: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:669 + //line sql.y:676 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 92: + case 94: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:673 + //line sql.y:680 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 93: + case 95: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:677 + //line sql.y:684 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} } - case 94: + case 96: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:681 + //line sql.y:688 { yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs} } - case 95: + case 97: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:687 + //line sql.y:694 { yyVAL.strs = make([]string, 0, 4) yyVAL.strs = append(yyVAL.strs, "'"+string(yyDollar[1].bytes)+"'") } - case 96: + case 98: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:692 + //line sql.y:699 { yyVAL.strs = append(yyDollar[1].strs, "'"+string(yyDollar[3].bytes)+"'") } - case 97: + case 99: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:697 + //line sql.y:704 { yyVAL.optVal = nil } - case 98: + case 100: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:701 + //line sql.y:708 { yyVAL.optVal = NewIntVal(yyDollar[2].bytes) } - case 99: + case 101: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:706 + //line sql.y:713 { yyVAL.LengthScaleOption = LengthScaleOption{} } - case 100: + case 102: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:710 + //line sql.y:717 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntVal(yyDollar[2].bytes), Scale: NewIntVal(yyDollar[4].bytes), } } - case 101: + case 103: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:718 + //line sql.y:725 { yyVAL.LengthScaleOption = LengthScaleOption{} } - case 102: + case 104: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:722 + //line sql.y:729 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntVal(yyDollar[2].bytes), } } - case 103: + case 105: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:728 + //line sql.y:735 { yyVAL.LengthScaleOption = LengthScaleOption{ Length: NewIntVal(yyDollar[2].bytes), Scale: NewIntVal(yyDollar[4].bytes), } } - case 104: + case 106: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:736 + //line sql.y:743 { yyVAL.boolVal = BoolVal(false) } - case 105: + case 107: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:740 + //line sql.y:747 { yyVAL.boolVal = BoolVal(true) } - case 106: + case 108: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:745 + //line sql.y:752 { yyVAL.boolVal = BoolVal(false) } - case 107: + case 109: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:749 + //line sql.y:756 { yyVAL.boolVal = BoolVal(true) } - case 108: + case 110: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:755 + //line sql.y:762 { yyVAL.boolVal = BoolVal(false) } - case 109: + case 111: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:759 + //line sql.y:766 { yyVAL.boolVal = BoolVal(false) } - case 110: + case 112: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:763 + //line sql.y:770 { yyVAL.boolVal = BoolVal(true) } - case 111: + case 113: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:768 + //line sql.y:775 { yyVAL.optVal = nil } - case 112: + case 114: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:772 + //line sql.y:779 { yyVAL.optVal = NewStrVal(yyDollar[2].bytes) } - case 113: + case 115: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:776 + //line sql.y:783 { yyVAL.optVal = NewIntVal(yyDollar[2].bytes) } - case 114: + case 116: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:780 + //line sql.y:787 { yyVAL.optVal = NewFloatVal(yyDollar[2].bytes) } - case 115: + case 117: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:784 + //line sql.y:791 { yyVAL.optVal = NewValArg(yyDollar[2].bytes) } - case 116: + case 118: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:788 + //line sql.y:795 { yyVAL.optVal = NewValArg(yyDollar[2].bytes) } - case 117: + case 119: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:793 + //line sql.y:800 { yyVAL.optVal = nil } - case 118: + case 120: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:797 + //line sql.y:804 { yyVAL.optVal = NewValArg(yyDollar[3].bytes) } - case 119: + case 121: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:802 + //line sql.y:809 { yyVAL.boolVal = BoolVal(false) } - case 120: + case 122: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:806 + //line sql.y:813 { yyVAL.boolVal = BoolVal(true) } - case 121: + case 123: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:811 + //line sql.y:818 { yyVAL.str = "" } - case 122: + case 124: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:815 + //line sql.y:822 { yyVAL.str = string(yyDollar[3].bytes) } - case 123: + case 125: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:819 + //line sql.y:826 { yyVAL.str = string(yyDollar[3].bytes) } - case 124: + case 126: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:824 + //line sql.y:831 { yyVAL.str = "" } - case 125: + case 127: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:828 + //line sql.y:835 { yyVAL.str = string(yyDollar[2].bytes) } - case 126: + case 128: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:833 + //line sql.y:840 { yyVAL.colKeyOpt = colKeyNone } - case 127: + case 129: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:837 + //line sql.y:844 { yyVAL.colKeyOpt = colKeyPrimary } - case 128: + case 130: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:841 + //line sql.y:848 { yyVAL.colKeyOpt = colKey } - case 129: + case 131: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:845 + //line sql.y:852 { yyVAL.colKeyOpt = colKeyUniqueKey } - case 130: + case 132: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:849 + //line sql.y:856 { yyVAL.colKeyOpt = colKeyUnique } - case 131: + case 133: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:854 + //line sql.y:861 { yyVAL.optVal = nil } - case 132: + case 134: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:858 + //line sql.y:865 { yyVAL.optVal = NewStrVal(yyDollar[2].bytes) } - case 133: + case 135: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:864 + //line sql.y:871 { yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns, Using: yyDollar[5].colIdent} } - case 134: + case 136: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:870 + //line sql.y:877 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} } - case 135: + case 137: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:874 + //line sql.y:881 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(string(yyDollar[3].bytes)), Unique: true} } - case 136: + case 138: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:878 + //line sql.y:885 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes), Name: NewColIdent(string(yyDollar[2].bytes)), Unique: true} } - case 137: + case 139: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:882 + //line sql.y:889 { yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(string(yyDollar[2].bytes)), Unique: false} } - case 138: + case 140: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:888 + //line sql.y:895 { yyVAL.str = string(yyDollar[1].bytes) } - case 139: + case 141: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:892 + //line sql.y:899 { yyVAL.str = string(yyDollar[1].bytes) } - case 140: + case 142: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:898 + //line sql.y:905 { yyVAL.indexColumns = []*IndexColumn{yyDollar[1].indexColumn} } - case 141: + case 143: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:902 + //line sql.y:909 { yyVAL.indexColumns = append(yyVAL.indexColumns, yyDollar[3].indexColumn) } - case 142: + case 144: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:908 + //line sql.y:915 { yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].optVal} } - case 143: + case 145: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:913 + //line sql.y:920 { yyVAL.str = "" } - case 144: + case 146: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:917 + //line sql.y:924 { yyVAL.str = " " + string(yyDollar[1].str) } - case 145: + case 147: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:921 + //line sql.y:928 { yyVAL.str = string(yyDollar[1].str) + ", " + string(yyDollar[3].str) } - case 146: + case 148: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:929 + //line sql.y:936 { yyVAL.str = yyDollar[1].str } - case 147: + case 149: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:933 + //line sql.y:940 { yyVAL.str = yyDollar[1].str + " " + yyDollar[2].str } - case 148: + case 150: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:937 + //line sql.y:944 { yyVAL.str = yyDollar[1].str + "=" + yyDollar[3].str } - case 149: + case 151: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:943 + //line sql.y:950 { yyVAL.str = yyDollar[1].colIdent.String() } - case 150: + case 152: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:947 + //line sql.y:954 { yyVAL.str = "'" + string(yyDollar[1].bytes) + "'" } - case 151: + case 153: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:951 + //line sql.y:958 { yyVAL.str = string(yyDollar[1].bytes) } - case 152: + case 154: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:957 + //line sql.y:964 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} } - case 153: + case 155: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:961 + //line sql.y:968 { // Change this to a rename statement yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[4].tableName, NewName: yyDollar[7].tableName} } - case 154: + case 156: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:966 + //line sql.y:973 { // Rename an index can just be an alter yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} } - case 155: + case 157: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:971 + //line sql.y:978 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName.ToViewName(), NewName: yyDollar[3].tableName.ToViewName()} } - case 156: + case 158: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:975 + //line sql.y:982 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, PartitionSpec: yyDollar[5].partSpec} } - case 157: + case 159: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:981 + //line sql.y:988 { yyVAL.partSpec = &PartitionSpec{Action: ReorganizeStr, Name: yyDollar[3].colIdent, Definitions: yyDollar[6].partDefs} } - case 158: + case 160: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:987 + //line sql.y:994 { yyVAL.partDefs = []*PartitionDefinition{yyDollar[1].partDef} } - case 159: + case 161: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:991 + //line sql.y:998 { yyVAL.partDefs = append(yyDollar[1].partDefs, yyDollar[3].partDef) } - case 160: + case 162: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:997 + //line sql.y:1004 { yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Limit: yyDollar[7].expr} } - case 161: + case 163: yyDollar = yyS[yypt-8 : yypt+1] - //line sql.y:1001 + //line sql.y:1008 { yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Maxvalue: true} } - case 162: + case 164: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1007 + //line sql.y:1014 { yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[3].tableName, NewName: yyDollar[5].tableName} } - case 163: + case 165: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1013 + //line sql.y:1020 { var exists bool if yyDollar[3].byt != 0 { @@ -3215,16 +3205,16 @@ yydefault: } yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].tableName, IfExists: exists} } - case 164: + case 166: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:1021 + //line sql.y:1028 { // Change this to an alter statement yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[5].tableName, NewName: yyDollar[5].tableName} } - case 165: + case 167: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1026 + //line sql.y:1033 { var exists bool if yyDollar[3].byt != 0 { @@ -3232,608 +3222,608 @@ yydefault: } yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].tableName.ToViewName(), IfExists: exists} } - case 166: + case 168: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1036 + //line sql.y:1043 { yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[3].tableName} } - case 167: + case 169: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1040 + //line sql.y:1047 { yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[2].tableName} } - case 168: + case 170: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1045 + //line sql.y:1052 { yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName, NewName: yyDollar[3].tableName} } - case 169: + case 171: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1051 + //line sql.y:1058 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 170: + case 172: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1055 + //line sql.y:1062 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 171: + case 173: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1059 + //line sql.y:1066 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 172: + case 174: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1064 + //line sql.y:1071 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 173: + case 175: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1068 + //line sql.y:1075 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 174: + case 176: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1072 + //line sql.y:1079 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 175: + case 177: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1076 + //line sql.y:1083 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 176: + case 178: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1080 + //line sql.y:1087 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} } - case 177: + case 179: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1084 + //line sql.y:1091 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 178: + case 180: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1088 + //line sql.y:1095 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 179: + case 181: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1092 + //line sql.y:1099 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 180: + case 182: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1096 + //line sql.y:1103 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 181: + case 183: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1100 + //line sql.y:1107 { yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} } - case 182: + case 184: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1104 + //line sql.y:1111 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 183: + case 185: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1108 + //line sql.y:1115 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 184: + case 186: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1112 + //line sql.y:1119 { yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} } - case 185: + case 187: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1116 + //line sql.y:1123 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 186: + case 188: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1120 + //line sql.y:1127 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), OnTable: yyDollar[4].tableName} } - case 187: + case 189: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1124 + //line sql.y:1131 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 188: + case 190: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1128 + //line sql.y:1135 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 189: + case 191: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1132 + //line sql.y:1139 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 190: + case 192: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1136 + //line sql.y:1143 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 191: + case 193: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1146 + //line sql.y:1153 { yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} } - case 192: + case 194: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1152 + //line sql.y:1159 { yyVAL.str = "" } - case 193: + case 195: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1156 + //line sql.y:1163 { yyVAL.str = SessionStr } - case 194: + case 196: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1160 + //line sql.y:1167 { yyVAL.str = GlobalStr } - case 195: + case 197: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1166 + //line sql.y:1173 { yyVAL.statement = &Use{DBName: yyDollar[2].tableIdent} } - case 196: + case 198: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1170 + //line sql.y:1177 { yyVAL.statement = &Use{DBName: TableIdent{v: ""}} } - case 197: + case 199: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1176 + //line sql.y:1183 { yyVAL.statement = &OtherRead{} } - case 198: + case 200: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1180 + //line sql.y:1187 { yyVAL.statement = &OtherRead{} } - case 199: + case 201: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1184 + //line sql.y:1191 { yyVAL.statement = &OtherRead{} } - case 200: + case 202: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1188 + //line sql.y:1195 { yyVAL.statement = &OtherAdmin{} } - case 201: + case 203: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1192 + //line sql.y:1199 { yyVAL.statement = &OtherAdmin{} } - case 202: + case 204: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1197 + //line sql.y:1204 { setAllowComments(yylex, true) } - case 203: + case 205: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1201 + //line sql.y:1208 { yyVAL.bytes2 = yyDollar[2].bytes2 setAllowComments(yylex, false) } - case 204: + case 206: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1207 + //line sql.y:1214 { yyVAL.bytes2 = nil } - case 205: + case 207: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1211 + //line sql.y:1218 { yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes) } - case 206: + case 208: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1217 + //line sql.y:1224 { yyVAL.str = UnionStr } - case 207: + case 209: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1221 + //line sql.y:1228 { yyVAL.str = UnionAllStr } - case 208: + case 210: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1225 + //line sql.y:1232 { yyVAL.str = UnionDistinctStr } - case 209: + case 211: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1230 + //line sql.y:1237 { yyVAL.str = "" } - case 210: + case 212: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1234 + //line sql.y:1241 { yyVAL.str = SQLNoCacheStr } - case 211: + case 213: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1238 + //line sql.y:1245 { yyVAL.str = SQLCacheStr } - case 212: + case 214: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1243 + //line sql.y:1250 { yyVAL.str = "" } - case 213: + case 215: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1247 + //line sql.y:1254 { yyVAL.str = DistinctStr } - case 214: + case 216: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1252 + //line sql.y:1259 { yyVAL.str = "" } - case 215: + case 217: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1256 + //line sql.y:1263 { yyVAL.str = StraightJoinHint } - case 216: + case 218: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1261 + //line sql.y:1268 { yyVAL.selectExprs = nil } - case 217: + case 219: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1265 + //line sql.y:1272 { yyVAL.selectExprs = yyDollar[1].selectExprs } - case 218: + case 220: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1271 + //line sql.y:1278 { yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr} } - case 219: + case 221: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1275 + //line sql.y:1282 { yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr) } - case 220: + case 222: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1281 + //line sql.y:1288 { yyVAL.selectExpr = &StarExpr{} } - case 221: + case 223: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1285 + //line sql.y:1292 { yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent} } - case 222: + case 224: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1289 + //line sql.y:1296 { yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} } - case 223: + case 225: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1293 + //line sql.y:1300 { yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} } - case 224: + case 226: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1298 + //line sql.y:1305 { yyVAL.colIdent = ColIdent{} } - case 225: + case 227: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1302 + //line sql.y:1309 { yyVAL.colIdent = yyDollar[1].colIdent } - case 226: + case 228: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1306 + //line sql.y:1313 { yyVAL.colIdent = yyDollar[2].colIdent } - case 228: + case 230: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1313 + //line sql.y:1320 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 229: + case 231: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1318 + //line sql.y:1325 { yyVAL.tableExprs = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} } - case 230: + case 232: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1322 + //line sql.y:1329 { yyVAL.tableExprs = yyDollar[2].tableExprs } - case 231: + case 233: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1328 + //line sql.y:1335 { yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr} } - case 232: + case 234: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1332 + //line sql.y:1339 { yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr) } - case 235: + case 237: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1342 + //line sql.y:1349 { yyVAL.tableExpr = yyDollar[1].aliasedTableName } - case 236: + case 238: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1346 + //line sql.y:1353 { yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].tableIdent} } - case 237: + case 239: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1350 + //line sql.y:1357 { yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs} } - case 238: + case 240: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1356 + //line sql.y:1363 { yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHints} } - case 239: + case 241: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:1360 + //line sql.y:1367 { yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitions, As: yyDollar[6].tableIdent, Hints: yyDollar[7].indexHints} } - case 240: + case 242: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1366 + //line sql.y:1373 { yyVAL.columns = Columns{yyDollar[1].colIdent} } - case 241: + case 243: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1370 + //line sql.y:1377 { yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) } - case 242: + case 244: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1376 + //line sql.y:1383 { yyVAL.partitions = Partitions{yyDollar[1].colIdent} } - case 243: + case 245: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1380 + //line sql.y:1387 { yyVAL.partitions = append(yyVAL.partitions, yyDollar[3].colIdent) } - case 244: + case 246: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1393 + //line sql.y:1400 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 245: + case 247: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1397 + //line sql.y:1404 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 246: + case 248: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1401 + //line sql.y:1408 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} } - case 247: + case 249: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1405 + //line sql.y:1412 { yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr} } - case 248: + case 250: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1411 + //line sql.y:1418 { yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} } - case 249: + case 251: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1413 + //line sql.y:1420 { yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns} } - case 250: + case 252: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1417 + //line sql.y:1424 { } - case 251: + case 253: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1419 + //line sql.y:1426 { yyVAL.joinCondition = yyDollar[1].joinCondition } - case 252: + case 254: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1423 + //line sql.y:1430 { } - case 253: + case 255: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1425 + //line sql.y:1432 { yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} } - case 254: + case 256: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1428 + //line sql.y:1435 { yyVAL.empty = struct{}{} } - case 255: + case 257: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1430 + //line sql.y:1437 { yyVAL.empty = struct{}{} } - case 256: + case 258: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1433 + //line sql.y:1440 { yyVAL.tableIdent = NewTableIdent("") } - case 257: + case 259: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1437 + //line sql.y:1444 { yyVAL.tableIdent = yyDollar[1].tableIdent } - case 258: + case 260: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1441 + //line sql.y:1448 { yyVAL.tableIdent = yyDollar[2].tableIdent } - case 260: + case 262: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1448 + //line sql.y:1455 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } - case 261: + case 263: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1454 + //line sql.y:1461 { yyVAL.str = JoinStr } - case 262: + case 264: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1458 + //line sql.y:1465 { yyVAL.str = JoinStr } - case 263: + case 265: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1462 + //line sql.y:1469 { yyVAL.str = JoinStr } - case 264: + case 266: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1468 + //line sql.y:1475 { yyVAL.str = StraightJoinStr } - case 265: + case 267: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1474 + //line sql.y:1481 { yyVAL.str = LeftJoinStr } - case 266: + case 268: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1478 + //line sql.y:1485 { yyVAL.str = LeftJoinStr } - case 267: + case 269: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1482 + //line sql.y:1489 { yyVAL.str = RightJoinStr } - case 268: + case 270: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1486 + //line sql.y:1493 { yyVAL.str = RightJoinStr } - case 269: + case 271: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1492 + //line sql.y:1499 { yyVAL.str = NaturalJoinStr } - case 270: + case 272: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1496 + //line sql.y:1503 { if yyDollar[2].str == LeftJoinStr { yyVAL.str = NaturalLeftJoinStr @@ -3841,453 +3831,453 @@ yydefault: yyVAL.str = NaturalRightJoinStr } } - case 271: + case 273: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1506 + //line sql.y:1513 { yyVAL.tableName = yyDollar[2].tableName } - case 272: + case 274: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1510 + //line sql.y:1517 { yyVAL.tableName = yyDollar[1].tableName } - case 273: + case 275: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1516 + //line sql.y:1523 { yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} } - case 274: + case 276: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1520 + //line sql.y:1527 { yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent} } - case 275: + case 277: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1525 + //line sql.y:1532 { yyVAL.indexHints = nil } - case 276: + case 278: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1529 + //line sql.y:1536 { yyVAL.indexHints = &IndexHints{Type: UseStr, Indexes: yyDollar[4].columns} } - case 277: + case 279: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1533 + //line sql.y:1540 { yyVAL.indexHints = &IndexHints{Type: IgnoreStr, Indexes: yyDollar[4].columns} } - case 278: + case 280: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1537 + //line sql.y:1544 { yyVAL.indexHints = &IndexHints{Type: ForceStr, Indexes: yyDollar[4].columns} } - case 279: + case 281: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1542 + //line sql.y:1549 { yyVAL.expr = nil } - case 280: + case 282: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1546 + //line sql.y:1553 { yyVAL.expr = yyDollar[2].expr } - case 281: + case 283: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1552 + //line sql.y:1559 { yyVAL.expr = yyDollar[1].expr } - case 282: + case 284: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1556 + //line sql.y:1563 { yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} } - case 283: + case 285: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1560 + //line sql.y:1567 { yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} } - case 284: + case 286: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1564 + //line sql.y:1571 { yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} } - case 285: + case 287: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1568 + //line sql.y:1575 { yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr} } - case 286: + case 288: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1572 + //line sql.y:1579 { yyVAL.expr = yyDollar[1].expr } - case 287: + case 289: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1576 + //line sql.y:1583 { yyVAL.expr = &Default{ColName: yyDollar[2].str} } - case 288: + case 290: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1582 + //line sql.y:1589 { yyVAL.str = "" } - case 289: + case 291: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1586 + //line sql.y:1593 { yyVAL.str = string(yyDollar[2].bytes) } - case 290: + case 292: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1592 + //line sql.y:1599 { yyVAL.boolVal = BoolVal(true) } - case 291: + case 293: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1596 + //line sql.y:1603 { yyVAL.boolVal = BoolVal(false) } - case 292: + case 294: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1602 + //line sql.y:1609 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].str, Right: yyDollar[3].expr} } - case 293: + case 295: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1606 + //line sql.y:1613 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InStr, Right: yyDollar[3].colTuple} } - case 294: + case 296: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1610 + //line sql.y:1617 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInStr, Right: yyDollar[4].colTuple} } - case 295: + case 297: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1614 + //line sql.y:1621 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeStr, Right: yyDollar[3].expr, Escape: yyDollar[4].expr} } - case 296: + case 298: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1618 + //line sql.y:1625 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeStr, Right: yyDollar[4].expr, Escape: yyDollar[5].expr} } - case 297: + case 299: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1622 + //line sql.y:1629 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpStr, Right: yyDollar[3].expr} } - case 298: + case 300: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1626 + //line sql.y:1633 { yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpStr, Right: yyDollar[4].expr} } - case 299: + case 301: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1630 + //line sql.y:1637 { yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenStr, From: yyDollar[3].expr, To: yyDollar[5].expr} } - case 300: + case 302: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:1634 + //line sql.y:1641 { yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenStr, From: yyDollar[4].expr, To: yyDollar[6].expr} } - case 301: + case 303: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1638 + //line sql.y:1645 { yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery} } - case 302: + case 304: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1644 + //line sql.y:1651 { yyVAL.str = IsNullStr } - case 303: + case 305: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1648 + //line sql.y:1655 { yyVAL.str = IsNotNullStr } - case 304: + case 306: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1652 + //line sql.y:1659 { yyVAL.str = IsTrueStr } - case 305: + case 307: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1656 + //line sql.y:1663 { yyVAL.str = IsNotTrueStr } - case 306: + case 308: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1660 + //line sql.y:1667 { yyVAL.str = IsFalseStr } - case 307: + case 309: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1664 + //line sql.y:1671 { yyVAL.str = IsNotFalseStr } - case 308: + case 310: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1670 + //line sql.y:1677 { yyVAL.str = EqualStr } - case 309: + case 311: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1674 + //line sql.y:1681 { yyVAL.str = LessThanStr } - case 310: + case 312: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1678 + //line sql.y:1685 { yyVAL.str = GreaterThanStr } - case 311: + case 313: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1682 + //line sql.y:1689 { yyVAL.str = LessEqualStr } - case 312: + case 314: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1686 + //line sql.y:1693 { yyVAL.str = GreaterEqualStr } - case 313: + case 315: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1690 + //line sql.y:1697 { yyVAL.str = NotEqualStr } - case 314: + case 316: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1694 + //line sql.y:1701 { yyVAL.str = NullSafeEqualStr } - case 315: + case 317: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1699 + //line sql.y:1706 { yyVAL.expr = nil } - case 316: + case 318: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1703 + //line sql.y:1710 { yyVAL.expr = yyDollar[2].expr } - case 317: + case 319: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1709 + //line sql.y:1716 { yyVAL.colTuple = yyDollar[1].valTuple } - case 318: + case 320: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1713 + //line sql.y:1720 { yyVAL.colTuple = yyDollar[1].subquery } - case 319: + case 321: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1717 + //line sql.y:1724 { yyVAL.colTuple = ListArg(yyDollar[1].bytes) } - case 320: + case 322: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1723 + //line sql.y:1730 { yyVAL.subquery = &Subquery{yyDollar[2].selStmt} } - case 321: + case 323: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1729 + //line sql.y:1736 { yyVAL.exprs = Exprs{yyDollar[1].expr} } - case 322: + case 324: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1733 + //line sql.y:1740 { yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr) } - case 323: + case 325: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1739 + //line sql.y:1746 { yyVAL.expr = yyDollar[1].expr } - case 324: + case 326: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1743 + //line sql.y:1750 { yyVAL.expr = yyDollar[1].boolVal } - case 325: + case 327: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1747 + //line sql.y:1754 { yyVAL.expr = yyDollar[1].colName } - case 326: + case 328: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1751 + //line sql.y:1758 { yyVAL.expr = yyDollar[1].expr } - case 327: + case 329: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:1755 + //line sql.y:1762 { yyVAL.expr = yyDollar[1].subquery } - case 328: + case 330: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1759 + //line sql.y:1766 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndStr, Right: yyDollar[3].expr} } - case 329: + case 331: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1763 + //line sql.y:1770 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrStr, Right: yyDollar[3].expr} } - case 330: + case 332: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1767 + //line sql.y:1774 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorStr, Right: yyDollar[3].expr} } - case 331: + case 333: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1771 + //line sql.y:1778 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusStr, Right: yyDollar[3].expr} } - case 332: + case 334: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1775 + //line sql.y:1782 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusStr, Right: yyDollar[3].expr} } - case 333: + case 335: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1779 + //line sql.y:1786 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultStr, Right: yyDollar[3].expr} } - case 334: + case 336: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1783 + //line sql.y:1790 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivStr, Right: yyDollar[3].expr} } - case 335: + case 337: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1787 + //line sql.y:1794 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivStr, Right: yyDollar[3].expr} } - case 336: + case 338: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1791 + //line sql.y:1798 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} } - case 337: + case 339: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1795 + //line sql.y:1802 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} } - case 338: + case 340: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1799 + //line sql.y:1806 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftStr, Right: yyDollar[3].expr} } - case 339: + case 341: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1803 + //line sql.y:1810 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightStr, Right: yyDollar[3].expr} } - case 340: + case 342: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1807 + //line sql.y:1814 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONExtractOp, Right: yyDollar[3].expr} } - case 341: + case 343: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1811 + //line sql.y:1818 { yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONUnquoteExtractOp, Right: yyDollar[3].expr} } - case 342: + case 344: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1815 + //line sql.y:1822 { yyVAL.expr = &CollateExpr{Expr: yyDollar[1].expr, Charset: yyDollar[3].str} } - case 343: + case 345: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1819 + //line sql.y:1826 { yyVAL.expr = &UnaryExpr{Operator: BinaryStr, Expr: yyDollar[2].expr} } - case 344: + case 346: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1823 + //line sql.y:1830 { yyVAL.expr = &UnaryExpr{Operator: UBinaryStr, Expr: yyDollar[2].expr} } - case 345: + case 347: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1827 + //line sql.y:1834 { if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { yyVAL.expr = num @@ -4295,9 +4285,9 @@ yydefault: yyVAL.expr = &UnaryExpr{Operator: UPlusStr, Expr: yyDollar[2].expr} } } - case 346: + case 348: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1835 + //line sql.y:1842 { if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { // Handle double negative @@ -4311,21 +4301,21 @@ yydefault: yyVAL.expr = &UnaryExpr{Operator: UMinusStr, Expr: yyDollar[2].expr} } } - case 347: + case 349: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1849 + //line sql.y:1856 { yyVAL.expr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].expr} } - case 348: + case 350: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1853 + //line sql.y:1860 { yyVAL.expr = &UnaryExpr{Operator: BangStr, Expr: yyDollar[2].expr} } - case 349: + case 351: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:1857 + //line sql.y:1864 { // This rule prevents the usage of INTERVAL // as a function. If support is needed for that, @@ -4333,395 +4323,395 @@ yydefault: // will be non-trivial because of grammar conflicts. yyVAL.expr = &IntervalExpr{Expr: yyDollar[2].expr, Unit: yyDollar[3].colIdent.String()} } - case 354: + case 356: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1875 + //line sql.y:1882 { yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs} } - case 355: + case 357: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1879 + //line sql.y:1886 { yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs} } - case 356: + case 358: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:1883 + //line sql.y:1890 { yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs} } - case 357: + case 359: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1893 + //line sql.y:1900 { yyVAL.expr = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprs} } - case 358: + case 360: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1897 + //line sql.y:1904 { yyVAL.expr = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprs} } - case 359: + case 361: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:1901 + //line sql.y:1908 { yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} } - case 360: + case 362: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:1905 + //line sql.y:1912 { yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} } - case 361: + case 363: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:1909 + //line sql.y:1916 { yyVAL.expr = &ConvertUsingExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].str} } - case 362: + case 364: yyDollar = yyS[yypt-9 : yypt+1] - //line sql.y:1913 + //line sql.y:1920 { yyVAL.expr = &MatchExpr{Columns: yyDollar[3].selectExprs, Expr: yyDollar[7].expr, Option: yyDollar[8].str} } - case 363: + case 365: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:1917 + //line sql.y:1924 { yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].str, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str} } - case 364: + case 366: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:1921 + //line sql.y:1928 { yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr} } - case 365: + case 367: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1925 + //line sql.y:1932 { yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colIdent} } - case 366: + case 368: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1935 + //line sql.y:1942 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_timestamp")} } - case 367: + case 369: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1939 + //line sql.y:1946 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_timestamp")} } - case 368: + case 370: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1943 + //line sql.y:1950 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_time")} } - case 369: + case 371: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1947 + //line sql.y:1954 { yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_date")} } - case 370: + case 372: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1952 + //line sql.y:1959 { yyVAL.expr = &FuncExpr{Name: NewColIdent("localtime")} } - case 371: + case 373: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1957 + //line sql.y:1964 { yyVAL.expr = &FuncExpr{Name: NewColIdent("localtimestamp")} } - case 372: + case 374: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1962 + //line sql.y:1969 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_date")} } - case 373: + case 375: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:1967 + //line sql.y:1974 { yyVAL.expr = &FuncExpr{Name: NewColIdent("current_time")} } - case 376: + case 378: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1981 + //line sql.y:1988 { yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs} } - case 377: + case 379: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1985 + //line sql.y:1992 { yyVAL.expr = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprs} } - case 378: + case 380: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1989 + //line sql.y:1996 { yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs} } - case 379: + case 381: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:1993 + //line sql.y:2000 { yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs} } - case 380: + case 382: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:1999 + //line sql.y:2006 { yyVAL.str = "" } - case 381: + case 383: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2003 + //line sql.y:2010 { yyVAL.str = BooleanModeStr } - case 382: + case 384: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2007 + //line sql.y:2014 { yyVAL.str = NaturalLanguageModeStr } - case 383: + case 385: yyDollar = yyS[yypt-7 : yypt+1] - //line sql.y:2011 + //line sql.y:2018 { yyVAL.str = NaturalLanguageModeWithQueryExpansionStr } - case 384: + case 386: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2015 + //line sql.y:2022 { yyVAL.str = QueryExpansionStr } - case 385: + case 387: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2021 + //line sql.y:2028 { yyVAL.str = string(yyDollar[1].bytes) } - case 386: + case 388: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2025 + //line sql.y:2032 { yyVAL.str = string(yyDollar[1].bytes) } - case 387: + case 389: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2031 + //line sql.y:2038 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 388: + case 390: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2035 + //line sql.y:2042 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Operator: CharacterSetStr} } - case 389: + case 391: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2039 + //line sql.y:2046 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: string(yyDollar[3].bytes)} } - case 390: + case 392: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2043 + //line sql.y:2050 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 391: + case 393: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2047 + //line sql.y:2054 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 392: + case 394: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2051 + //line sql.y:2058 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length yyVAL.convertType.Scale = yyDollar[2].LengthScaleOption.Scale } - case 393: + case 395: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2057 + //line sql.y:2064 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 394: + case 396: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2061 + //line sql.y:2068 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 395: + case 397: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2065 + //line sql.y:2072 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 396: + case 398: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2069 + //line sql.y:2076 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 397: + case 399: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2073 + //line sql.y:2080 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} } - case 398: + case 400: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2077 + //line sql.y:2084 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 399: + case 401: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2081 + //line sql.y:2088 { yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} } - case 400: + case 402: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2086 + //line sql.y:2093 { yyVAL.expr = nil } - case 401: + case 403: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2090 + //line sql.y:2097 { yyVAL.expr = yyDollar[1].expr } - case 402: + case 404: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2095 + //line sql.y:2102 { yyVAL.str = string("") } - case 403: + case 405: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2099 + //line sql.y:2106 { yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" } - case 404: + case 406: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2105 + //line sql.y:2112 { yyVAL.whens = []*When{yyDollar[1].when} } - case 405: + case 407: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2109 + //line sql.y:2116 { yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) } - case 406: + case 408: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2115 + //line sql.y:2122 { yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} } - case 407: + case 409: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2120 + //line sql.y:2127 { yyVAL.expr = nil } - case 408: + case 410: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2124 + //line sql.y:2131 { yyVAL.expr = yyDollar[2].expr } - case 409: + case 411: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2130 + //line sql.y:2137 { yyVAL.colName = &ColName{Name: yyDollar[1].colIdent} } - case 410: + case 412: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2134 + //line sql.y:2141 { yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} } - case 411: + case 413: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2138 + //line sql.y:2145 { yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} } - case 412: + case 414: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2144 + //line sql.y:2151 { yyVAL.expr = NewStrVal(yyDollar[1].bytes) } - case 413: + case 415: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2148 + //line sql.y:2155 { yyVAL.expr = NewHexVal(yyDollar[1].bytes) } - case 414: + case 416: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2152 + //line sql.y:2159 { yyVAL.expr = NewBitVal(yyDollar[1].bytes) } - case 415: + case 417: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2156 + //line sql.y:2163 { yyVAL.expr = NewIntVal(yyDollar[1].bytes) } - case 416: + case 418: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2160 + //line sql.y:2167 { yyVAL.expr = NewFloatVal(yyDollar[1].bytes) } - case 417: + case 419: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2164 + //line sql.y:2171 { yyVAL.expr = NewHexNum(yyDollar[1].bytes) } - case 418: + case 420: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2168 + //line sql.y:2175 { yyVAL.expr = NewValArg(yyDollar[1].bytes) } - case 419: + case 421: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2172 + //line sql.y:2179 { yyVAL.expr = &NullVal{} } - case 420: + case 422: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2178 + //line sql.y:2185 { // TODO(sougou): Deprecate this construct. if yyDollar[1].colIdent.Lowered() != "value" { @@ -4730,239 +4720,239 @@ yydefault: } yyVAL.expr = NewIntVal([]byte("1")) } - case 421: + case 423: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2187 + //line sql.y:2194 { yyVAL.expr = NewIntVal(yyDollar[1].bytes) } - case 422: + case 424: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2191 + //line sql.y:2198 { yyVAL.expr = NewValArg(yyDollar[1].bytes) } - case 423: + case 425: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2196 + //line sql.y:2203 { yyVAL.exprs = nil } - case 424: + case 426: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2200 + //line sql.y:2207 { yyVAL.exprs = yyDollar[3].exprs } - case 425: + case 427: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2205 + //line sql.y:2212 { yyVAL.expr = nil } - case 426: + case 428: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2209 + //line sql.y:2216 { yyVAL.expr = yyDollar[2].expr } - case 427: + case 429: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2214 + //line sql.y:2221 { yyVAL.orderBy = nil } - case 428: + case 430: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2218 + //line sql.y:2225 { yyVAL.orderBy = yyDollar[3].orderBy } - case 429: + case 431: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2224 + //line sql.y:2231 { yyVAL.orderBy = OrderBy{yyDollar[1].order} } - case 430: + case 432: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2228 + //line sql.y:2235 { yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order) } - case 431: + case 433: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2234 + //line sql.y:2241 { yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].str} } - case 432: + case 434: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2239 + //line sql.y:2246 { yyVAL.str = AscScr } - case 433: + case 435: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2243 + //line sql.y:2250 { yyVAL.str = AscScr } - case 434: + case 436: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2247 + //line sql.y:2254 { yyVAL.str = DescScr } - case 435: + case 437: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2252 + //line sql.y:2259 { yyVAL.limit = nil } - case 436: + case 438: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2256 + //line sql.y:2263 { yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr} } - case 437: + case 439: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2260 + //line sql.y:2267 { yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr} } - case 438: + case 440: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2264 + //line sql.y:2271 { yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr} } - case 439: + case 441: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2269 + //line sql.y:2276 { yyVAL.str = "" } - case 440: + case 442: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2273 + //line sql.y:2280 { yyVAL.str = ForUpdateStr } - case 441: + case 443: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2277 + //line sql.y:2284 { yyVAL.str = ShareModeStr } - case 442: + case 444: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2290 + //line sql.y:2297 { yyVAL.ins = &Insert{Rows: yyDollar[2].values} } - case 443: + case 445: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2294 + //line sql.y:2301 { yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt} } - case 444: + case 446: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2298 + //line sql.y:2305 { // Drop the redundant parenthesis. yyVAL.ins = &Insert{Rows: yyDollar[2].selStmt} } - case 445: + case 447: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2303 + //line sql.y:2310 { yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values} } - case 446: + case 448: yyDollar = yyS[yypt-4 : yypt+1] - //line sql.y:2307 + //line sql.y:2314 { yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt} } - case 447: + case 449: yyDollar = yyS[yypt-6 : yypt+1] - //line sql.y:2311 + //line sql.y:2318 { // Drop the redundant parenthesis. yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].selStmt} } - case 448: + case 450: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2318 + //line sql.y:2325 { yyVAL.columns = Columns{yyDollar[1].colIdent} } - case 449: + case 451: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2322 + //line sql.y:2329 { yyVAL.columns = Columns{yyDollar[3].colIdent} } - case 450: + case 452: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2326 + //line sql.y:2333 { yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) } - case 451: + case 453: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2330 + //line sql.y:2337 { yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) } - case 452: + case 454: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2335 + //line sql.y:2342 { yyVAL.updateExprs = nil } - case 453: + case 455: yyDollar = yyS[yypt-5 : yypt+1] - //line sql.y:2339 + //line sql.y:2346 { yyVAL.updateExprs = yyDollar[5].updateExprs } - case 454: + case 456: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2345 + //line sql.y:2352 { yyVAL.values = Values{yyDollar[1].valTuple} } - case 455: + case 457: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2349 + //line sql.y:2356 { yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) } - case 456: + case 458: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2355 + //line sql.y:2362 { yyVAL.valTuple = yyDollar[1].valTuple } - case 457: + case 459: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2359 + //line sql.y:2366 { yyVAL.valTuple = ValTuple{} } - case 458: + case 460: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2365 + //line sql.y:2372 { yyVAL.valTuple = ValTuple(yyDollar[2].exprs) } - case 459: + case 461: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2371 + //line sql.y:2378 { if len(yyDollar[1].valTuple) == 1 { yyVAL.expr = &ParenExpr{yyDollar[1].valTuple[0]} @@ -4970,195 +4960,189 @@ yydefault: yyVAL.expr = yyDollar[1].valTuple } } - case 460: + case 462: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2381 + //line sql.y:2388 { yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr} } - case 461: + case 463: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2385 + //line sql.y:2392 { yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr) } - case 462: + case 464: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2391 + //line sql.y:2398 { yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr} } - case 465: + case 467: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2400 + //line sql.y:2407 { yyVAL.byt = 0 } - case 466: + case 468: yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2402 + //line sql.y:2409 { yyVAL.byt = 1 } - case 467: + case 469: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2405 + //line sql.y:2412 { yyVAL.empty = struct{}{} } - case 468: + case 470: yyDollar = yyS[yypt-3 : yypt+1] - //line sql.y:2407 + //line sql.y:2414 { yyVAL.empty = struct{}{} } - case 469: + case 471: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2410 + //line sql.y:2417 { yyVAL.str = "" } - case 470: - yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2412 - { - yyVAL.str = IgnoreStr - } - case 471: - yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2416 - { - yyVAL.empty = struct{}{} - } case 472: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2418 + //line sql.y:2419 { - yyVAL.empty = struct{}{} + yyVAL.str = IgnoreStr } case 473: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2420 + //line sql.y:2423 { yyVAL.empty = struct{}{} } case 474: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2422 + //line sql.y:2425 { yyVAL.empty = struct{}{} } case 475: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2424 + //line sql.y:2427 { yyVAL.empty = struct{}{} } case 476: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2426 + //line sql.y:2429 { yyVAL.empty = struct{}{} } case 477: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2428 + //line sql.y:2431 { yyVAL.empty = struct{}{} } case 478: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2430 + //line sql.y:2433 { yyVAL.empty = struct{}{} } case 479: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2432 + //line sql.y:2435 { yyVAL.empty = struct{}{} } case 480: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2434 + //line sql.y:2437 { yyVAL.empty = struct{}{} } case 481: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2436 + //line sql.y:2439 { yyVAL.empty = struct{}{} } case 482: - yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2439 + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2441 { yyVAL.empty = struct{}{} } case 483: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2441 + //line sql.y:2443 { yyVAL.empty = struct{}{} } case 484: - yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2443 + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2446 { yyVAL.empty = struct{}{} } case 485: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2447 + //line sql.y:2448 { yyVAL.empty = struct{}{} } case 486: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2449 + //line sql.y:2450 { yyVAL.empty = struct{}{} } case 487: - yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2452 + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2454 { yyVAL.empty = struct{}{} } case 488: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2454 + //line sql.y:2456 { yyVAL.empty = struct{}{} } case 489: - yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2456 + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2459 { yyVAL.empty = struct{}{} } case 490: - yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2459 + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2461 { - yyVAL.colIdent = ColIdent{} + yyVAL.empty = struct{}{} } case 491: - yyDollar = yyS[yypt-2 : yypt+1] - //line sql.y:2461 + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2463 { - yyVAL.colIdent = yyDollar[2].colIdent + yyVAL.empty = struct{}{} } case 492: - yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2465 + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2466 { - yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + yyVAL.colIdent = ColIdent{} } case 493: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2468 + { + yyVAL.colIdent = yyDollar[2].colIdent + } + case 494: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2469 + //line sql.y:2472 { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } @@ -5168,15 +5152,15 @@ yydefault: { yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 496: + case 497: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2482 + //line sql.y:2483 { - yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) } - case 497: + case 498: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2486 + //line sql.y:2489 { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } @@ -5186,42 +5170,48 @@ yydefault: { yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) } - case 659: + case 501: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2500 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 661: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2678 + //line sql.y:2685 { if incNesting(yylex) { yylex.Error("max nesting level reached") return 1 } } - case 660: + case 662: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2687 + //line sql.y:2694 { decNesting(yylex) } - case 661: + case 663: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2692 + //line sql.y:2699 { forceEOF(yylex) } - case 662: + case 664: yyDollar = yyS[yypt-0 : yypt+1] - //line sql.y:2697 + //line sql.y:2704 { forceEOF(yylex) } - case 663: + case 665: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2701 + //line sql.y:2708 { forceEOF(yylex) } - case 664: + case 666: yyDollar = yyS[yypt-1 : yypt+1] - //line sql.y:2705 + //line sql.y:2712 { forceEOF(yylex) } diff --git a/go/vt/sqlparser/sql.y b/go/vt/sqlparser/sql.y index bf224694911..495836765fa 100644 --- a/go/vt/sqlparser/sql.y +++ b/go/vt/sqlparser/sql.y @@ -107,7 +107,7 @@ func forceEOF(yylex interface{}) { %token LEX_ERROR %left UNION -%token SELECT INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT OFFSET FOR +%token SELECT STREAM INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT OFFSET FOR %token ALL DISTINCT AS EXISTS ASC DESC INTO DUPLICATE KEY DEFAULT SET LOCK KEYS %token VALUES LAST_INSERT_ID %token NEXT VALUE SHARE MODE @@ -185,7 +185,7 @@ func forceEOF(yylex interface{}) { %type command %type select_statement base_select union_lhs union_rhs -%type insert_statement update_statement delete_statement set_statement +%type stream_statement insert_statement update_statement delete_statement set_statement %type create_statement alter_statement rename_statement drop_statement truncate_statement %type create_table_prefix %type analyze_statement show_statement use_statement other_statement @@ -287,6 +287,7 @@ command: { $$ = $1 } +| stream_statement | insert_statement | update_statement | delete_statement @@ -319,6 +320,12 @@ select_statement: $$ = &Select{Comments: Comments($2), Cache: $3, SelectExprs: SelectExprs{Nextval{Expr: $5}}, From: TableExprs{&AliasedTableExpr{Expr: $7}}} } +stream_statement: + STREAM comment_opt select_expression FROM table_name + { + $$ = &Stream{Comments: Comments($2), SelectExpr: $3, Table: $5} + } + // base_select is an unparenthesized SELECT with no order by clause or beyond. base_select: SELECT comment_opt cache_opt distinct_opt straight_join_opt select_expression_list from_opt where_expression_opt group_by_opt having_opt diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index cd256a169a6..5db4187c6e6 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -313,6 +313,7 @@ var keywords = map[string]int{ "status": STATUS, "stored": UNUSED, "straight_join": STRAIGHT_JOIN, + "stream": STREAM, "table": TABLE, "tables": TABLES, "terminated": UNUSED, diff --git a/go/vt/srvtopo/discover.go b/go/vt/srvtopo/discover.go new file mode 100644 index 00000000000..bc222c659a1 --- /dev/null +++ b/go/vt/srvtopo/discover.go @@ -0,0 +1,99 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package srvtopo + +import ( + "sync" + + log "github.com/golang/glog" + "golang.org/x/net/context" + + "github.com/youtube/vitess/go/vt/concurrency" + "github.com/youtube/vitess/go/vt/topo" + + querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" +) + +// FindAllTargets goes through all serving shards in the topology +// for the provided tablet types. It returns one Target object per +// keyspace / shard / matching TabletType. +func FindAllTargets(ctx context.Context, ts Server, cell string, tabletTypes []topodatapb.TabletType) ([]*querypb.Target, error) { + ksNames, err := ts.GetSrvKeyspaceNames(ctx, cell) + if err != nil { + return nil, err + } + + var targets []*querypb.Target + var wg sync.WaitGroup + var mu sync.Mutex + var errRecorder concurrency.AllErrorRecorder + for _, ksName := range ksNames { + wg.Add(1) + go func(keyspace string) { + defer wg.Done() + + // Get SrvKeyspace for cell/keyspace. + ks, err := ts.GetSrvKeyspace(ctx, cell, keyspace) + if err != nil { + if err == topo.ErrNoNode { + // Possibly a race condition, or leftover + // crud in the topology service. Just log it. + log.Warningf("GetSrvKeyspace(%v, %v) returned ErrNoNode, skipping that SrvKeyspace", cell, keyspace) + } else { + // More serious error, abort. + errRecorder.RecordError(err) + } + return + } + + // Get all shard names that are used for serving. + for _, ksPartition := range ks.Partitions { + // Check we're waiting for tablets of that type. + waitForIt := false + for _, tt := range tabletTypes { + if tt == ksPartition.ServedType { + waitForIt = true + } + } + if !waitForIt { + continue + } + + // Add all the shards. Note we can't have + // duplicates, as there is only one entry per + // TabletType in the Partitions list. + mu.Lock() + for _, shard := range ksPartition.ShardReferences { + targets = append(targets, &querypb.Target{ + Cell: cell, + Keyspace: keyspace, + Shard: shard.Name, + TabletType: ksPartition.ServedType, + }) + } + mu.Unlock() + } + }(ksName) + } + wg.Wait() + if errRecorder.HasErrors() { + return nil, errRecorder.Error() + } + + return targets, nil +} diff --git a/go/vt/srvtopo/discover_test.go b/go/vt/srvtopo/discover_test.go new file mode 100644 index 00000000000..53c66091082 --- /dev/null +++ b/go/vt/srvtopo/discover_test.go @@ -0,0 +1,167 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package srvtopo + +import ( + "flag" + "reflect" + "sort" + "testing" + + "golang.org/x/net/context" + + "github.com/youtube/vitess/go/vt/topo/memorytopo" + + querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" +) + +// To sort []*querypb.Target for comparison. +type TargetArray []*querypb.Target + +func (a TargetArray) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a TargetArray) Len() int { return len(a) } +func (a TargetArray) Less(i, j int) bool { + if a[i].Cell != a[j].Cell { + return a[i].Cell < a[j].Cell + } + if a[i].Keyspace != a[j].Keyspace { + return a[i].Keyspace < a[j].Keyspace + } + if a[i].Shard != a[j].Shard { + return a[i].Shard < a[j].Shard + } + return a[i].TabletType < a[j].TabletType +} + +func TestFindAllTargets(t *testing.T) { + ctx := context.Background() + ts := memorytopo.NewServer("cell1", "cell2") + flag.Set("srv_topo_cache_refresh", "0s") // No caching values + flag.Set("srv_topo_cache_ttl", "0s") // No caching values + rs := NewResilientServer(ts, "TestFindAllKeyspaceShards") + + // No keyspace / shards. + ks, err := FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_MASTER}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(ks) > 0 { + t.Errorf("why did I get anything? %v", ks) + } + + // Add one. + if err := ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace", &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_MASTER, + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "test_shard0", + }, + }, + }, + }, + }); err != nil { + t.Fatalf("can't add srvKeyspace: %v", err) + } + + // Get it. + ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_MASTER}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(ks, []*querypb.Target{ + { + Cell: "cell1", + Keyspace: "test_keyspace", + Shard: "test_shard0", + TabletType: topodatapb.TabletType_MASTER, + }, + }) { + t.Errorf("got wrong value: %v", ks) + } + + // Add another one. + if err := ts.UpdateSrvKeyspace(ctx, "cell1", "test_keyspace2", &topodatapb.SrvKeyspace{ + Partitions: []*topodatapb.SrvKeyspace_KeyspacePartition{ + { + ServedType: topodatapb.TabletType_MASTER, + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "test_shard1", + }, + }, + }, + { + ServedType: topodatapb.TabletType_REPLICA, + ShardReferences: []*topodatapb.ShardReference{ + { + Name: "test_shard2", + }, + }, + }, + }, + }); err != nil { + t.Fatalf("can't add srvKeyspace: %v", err) + } + + // Get it for all types. + ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + sort.Sort(TargetArray(ks)) + if !reflect.DeepEqual(ks, []*querypb.Target{ + { + Cell: "cell1", + Keyspace: "test_keyspace", + Shard: "test_shard0", + TabletType: topodatapb.TabletType_MASTER, + }, + { + Cell: "cell1", + Keyspace: "test_keyspace2", + Shard: "test_shard1", + TabletType: topodatapb.TabletType_MASTER, + }, + { + Cell: "cell1", + Keyspace: "test_keyspace2", + Shard: "test_shard2", + TabletType: topodatapb.TabletType_REPLICA, + }, + }) { + t.Errorf("got wrong value: %v", ks) + } + + // Only get the REPLICA targets. + ks, err = FindAllTargets(ctx, rs, "cell1", []topodatapb.TabletType{topodatapb.TabletType_REPLICA}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(ks, []*querypb.Target{ + { + Cell: "cell1", + Keyspace: "test_keyspace2", + Shard: "test_shard2", + TabletType: topodatapb.TabletType_REPLICA, + }, + }) { + t.Errorf("got wrong value: %v", ks) + } +} diff --git a/go/vt/srvtopo/resilient_server.go b/go/vt/srvtopo/resilient_server.go index 9f60f328615..6034b77a409 100644 --- a/go/vt/srvtopo/resilient_server.go +++ b/go/vt/srvtopo/resilient_server.go @@ -28,6 +28,7 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/stats" + "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/topo" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -70,34 +71,42 @@ const ( - + + + {{range $i, $skn := .SrvKeyspaceNames}} - + + + {{end}}
SrvKeyspace Names CacheSrvKeyspace Names Cache
Cell SrvKeyspace NamesTTLError
{{github_com_youtube_vitess_vtctld_srv_cell $skn.Cell}}{{if $skn.LastError}}{{$skn.LastError}}{{else}}{{range $j, $value := $skn.Value}}{{github_com_youtube_vitess_vtctld_srv_keyspace $skn.Cell $value}} {{end}}{{end}}{{range $j, $value := $skn.Value}}{{github_com_youtube_vitess_vtctld_srv_keyspace $skn.Cell $value}} {{end}}{{github_com_youtube_vitess_srvtopo_ttl_time $skn.ExpirationTime}}{{if $skn.LastError}}({{github_com_youtube_vitess_srvtopo_time_since $skn.LastQueryTime}}Ago) {{$skn.LastError}}{{end}}

- + + + {{range $i, $sk := .SrvKeyspaces}} - + + + {{end}}
SrvKeyspace CacheSrvKeyspace Cache
Cell Keyspace SrvKeyspaceTTLError
{{github_com_youtube_vitess_vtctld_srv_cell $sk.Cell}} {{github_com_youtube_vitess_vtctld_srv_keyspace $sk.Cell $sk.Keyspace}}{{if $sk.LastError}}{{$sk.LastError}}{{else}}{{$sk.StatusAsHTML}}{{end}}{{$sk.StatusAsHTML}}{{github_com_youtube_vitess_srvtopo_ttl_time $sk.ExpirationTime}}{{if $sk.LastError}}({{github_com_youtube_vitess_srvtopo_time_since $sk.LastErrorTime}} Ago) {{$sk.LastError}}{{end}}
@@ -128,6 +137,10 @@ type srvKeyspaceNamesEntry struct { // the mutex protects any access to this structure (read or write) mutex sync.Mutex + // refreshingChan is used to synchronize requests and avoid hammering + // the topo server + refreshingChan chan struct{} + insertionTime time.Time lastQueryTime time.Time value []string @@ -135,6 +148,14 @@ type srvKeyspaceNamesEntry struct { lastErrorCtx context.Context } +type watchState int + +const ( + watchStateIdle watchState = iota + watchStateStarting + watchStateRunning +) + type srvKeyspaceEntry struct { // unmutable values cell string @@ -143,19 +164,30 @@ type srvKeyspaceEntry struct { // the mutex protects any access to this structure (read or write) mutex sync.RWMutex - // watchRunning describes if the watch go routine is running. + // watchState describes if the watch go routine is running. // It is easier to have an explicit field instead of guessing // based on value and lastError. // - // if watchrunning is not set, the next time we try to access the - // keyspace, we will start a watch. - // if watchrunning is set, we are guaranteed to have lastError be + // if the state is watchStateIdle, and the time since the last error is + // greater than the refresh time, the next time we try to access the + // keyspace, we will set watchState to watchStarting and kick off the + // watch in a separate goroutine + // + // in watchStateRunning, we are guaranteed to have lastError be // non-nil and an up-to-date value (which may be nil) - watchRunning bool - value *topodatapb.SrvKeyspace - lastError error + watchState watchState - // valueTime is the time when the watch last obtained a non-nil value. + // watchStartingCond is used to serialize callers for the first attempt + // to establish the watch + watchStartingChan chan struct{} + + value *topodatapb.SrvKeyspace + lastError error + + // lastValueTime is the time when the cached value is known to be valid, + // either because the watch last obtained a non-nil value or when a + // running watch first got an error. + // // It is compared to the TTL to determine if we can return the value // when the watch is failing lastValueTime time.Time @@ -168,9 +200,14 @@ type srvKeyspaceEntry struct { // has a bad keyspace or cell name. lastErrorCtx context.Context - // lastErrorTime records the time that the watch failed, so that - // any requests that come in + // lastErrorTime records the time that the watch failed, used for + // the status page lastErrorTime time.Time + + // lastWatchTime records the time that the watch was last started, + // used to ensure we don't restart the watch more often than the + // refresh time + lastWatchTime time.Time } // NewResilientServer creates a new ResilientServer @@ -207,17 +244,19 @@ func (server *ResilientServer) GetSrvKeyspaceNames(ctx context.Context, cell str } server.mutex.Unlock() - // Lock the entry, and do everything holding the lock. This - // means two concurrent requests will only issue one - // underlying query. + // Lock the entry, and do everything holding the lock except + // querying the underlying topo server. + // + // This means that even if the topo server is very slow, two concurrent + // requests will only issue one underlying query. entry.mutex.Lock() defer entry.mutex.Unlock() - // If it is not time to check again, then return either the cached - // value or the cached error cacheValid := entry.value != nil && time.Since(entry.insertionTime) < server.cacheTTL shouldRefresh := time.Since(entry.lastQueryTime) > server.cacheRefresh + // If it is not time to check again, then return either the cached + // value or the cached error but don't ask consul again. if !shouldRefresh { if cacheValid { return entry.value, nil @@ -225,35 +264,74 @@ func (server *ResilientServer) GetSrvKeyspaceNames(ctx context.Context, cell str return nil, entry.lastError } - // Not in cache or needs refresh so try to get the real value. - // We use the context that issued the query here. - result, err := server.topoServer.GetSrvKeyspaceNames(ctx, cell) - if err == nil { - // save the value we got and the current time in the cache - entry.insertionTime = time.Now() - entry.value = result - } else { - if entry.insertionTime.IsZero() { - server.counts.Add(errorCategory, 1) - log.Errorf("GetSrvKeyspaceNames(%v, %v) failed: %v (no cached value, caching and returning error)", ctx, cell, err) - - } else if cacheValid { - server.counts.Add(cachedCategory, 1) - log.Warningf("GetSrvKeyspaceNames(%v, %v) failed: %v (returning cached value: %v %v)", ctx, cell, err, entry.value, entry.lastError) - result = entry.value - err = nil - } else { - server.counts.Add(errorCategory, 1) - log.Errorf("GetSrvKeyspaceNames(%v, %v) failed: %v (cached value expired)", ctx, cell, err) - entry.insertionTime = time.Time{} - entry.value = nil - } + // Refresh the state in a background goroutine if no refresh is already + // in progress none is already running. This way queries are not blocked + // while the cache is still valid but past the refresh time, and avoids + // calling out to the topo service while the lock is held. + if entry.refreshingChan == nil { + entry.refreshingChan = make(chan struct{}) + entry.lastQueryTime = time.Now() + go func() { + result, err := server.topoServer.GetSrvKeyspaceNames(ctx, cell) + + entry.mutex.Lock() + defer func() { + close(entry.refreshingChan) + entry.refreshingChan = nil + entry.mutex.Unlock() + }() + + if err == nil { + // save the value we got and the current time in the cache + entry.insertionTime = time.Now() + entry.value = result + } else { + server.counts.Add(errorCategory, 1) + if entry.insertionTime.IsZero() { + log.Errorf("GetSrvKeyspaceNames(%v, %v) failed: %v (no cached value, caching and returning error)", ctx, cell, err) + + } else if entry.value != nil && time.Since(entry.insertionTime) < server.cacheTTL { + server.counts.Add(cachedCategory, 1) + log.Warningf("GetSrvKeyspaceNames(%v, %v) failed: %v (keeping cached value: %v)", ctx, cell, err, entry.value) + } else { + log.Errorf("GetSrvKeyspaceNames(%v, %v) failed: %v (cached value expired)", ctx, cell, err) + entry.insertionTime = time.Time{} + entry.value = nil + } + } + + entry.lastError = err + entry.lastErrorCtx = ctx + }() + } + + // If the cached entry is still valid then use it, otherwise wait + // for the refresh attempt to complete to get a more up to date + // response. + // + // In the event that the topo service is slow or unresponsive either + // on the initial fetch or if the cache TTL expires, then several + // requests could be blocked on refreshingCond waiting for the response + // to come back. + if cacheValid { + return entry.value, nil + } + + refreshingChan := entry.refreshingChan + entry.mutex.Unlock() + select { + case <-refreshingChan: + case <-ctx.Done(): + entry.mutex.Lock() + return nil, fmt.Errorf("timed out waiting for keyspace names") + } + entry.mutex.Lock() + + if entry.value != nil { + return entry.value, nil } - entry.lastError = err - entry.lastQueryTime = time.Now() - entry.lastErrorCtx = ctx - return result, err + return nil, entry.lastError } func (server *ResilientServer) getSrvKeyspaceEntry(cell, keyspace string) *srvKeyspaceEntry { @@ -286,49 +364,79 @@ func (server *ResilientServer) GetSrvKeyspace(ctx context.Context, cell, keyspac // If the watch is already running, return the value entry.mutex.RLock() - if entry.watchRunning { + if entry.watchState == watchStateRunning { v, e := entry.value, entry.lastError entry.mutex.RUnlock() return v, e } entry.mutex.RUnlock() - // Lock the entry, and do everything holding the lock. This - // means two concurrent requests will only issue one - // underlying query. entry.mutex.Lock() defer entry.mutex.Unlock() - // If the watch is already running, return the value - if entry.watchRunning { + // If the watch is already running (now that we have the write lock), + // return the value + if entry.watchState == watchStateRunning { return entry.value, entry.lastError } - // Watch is not running, but check if the last time we got an error was - // more recent than the refresh interval. + // Watch is not running. Start a new one if it is time to use it and if + // there isn't one already one in the process of being started. + shouldRefresh := time.Since(entry.lastErrorTime) > server.cacheRefresh + if shouldRefresh && (entry.watchState == watchStateIdle) { + entry.watchState = watchStateStarting + entry.watchStartingChan = make(chan struct{}) + go server.watchSrvKeyspace(ctx, entry, cell, keyspace) + } + + // If the cached value is still valid, use it. Otherwise wait + // for the watch attempt to complete to get a more up to date + // response. // - // If so return either the last cached value or the last error we got. + // In the event that the topo service is slow or unresponsive either + // on the initial fetch or if the cache TTL expires, then several + // requests could be blocked waiting for the response to come back. cacheValid := entry.value != nil && time.Since(entry.lastValueTime) < server.cacheTTL - shouldRefresh := time.Since(entry.lastErrorTime) > server.cacheRefresh + if cacheValid { + server.counts.Add(cachedCategory, 1) + return entry.value, nil + } - if !shouldRefresh { - if cacheValid { - server.counts.Add(cachedCategory, 1) - return entry.value, nil + if entry.watchState == watchStateStarting { + watchStartingChan := entry.watchStartingChan + entry.mutex.Unlock() + select { + case <-watchStartingChan: + case <-ctx.Done(): + entry.mutex.Lock() + return nil, fmt.Errorf("timed out waiting for keyspace") } - return nil, entry.lastError + entry.mutex.Lock() } - // Time to try to start the watch again. + if entry.value != nil { + return entry.value, nil + } + + return nil, entry.lastError +} + +// watchSrvKeyspace is started in a separate goroutine and attempts to establish +// a watch. The caller context is provided to show in the UI in case the watch +// fails due to an error like a mistyped keyspace. +func (server *ResilientServer) watchSrvKeyspace(callerCtx context.Context, entry *srvKeyspaceEntry, cell, keyspace string) { // We use a background context, as starting the watch should keep going // even if the current query context is short-lived. newCtx := context.Background() current, changes, cancel := server.topoServer.WatchSrvKeyspace(newCtx, cell, keyspace) + + entry.mutex.Lock() + if current.Err != nil { // lastError and lastErrorCtx will be visible from the UI // until the next try entry.lastError = current.Err - entry.lastErrorCtx = ctx + entry.lastErrorCtx = callerCtx entry.lastErrorTime = time.Now() // if the node disappears, delete the cached value @@ -339,54 +447,68 @@ func (server *ResilientServer) GetSrvKeyspace(ctx context.Context, cell, keyspac server.counts.Add(errorCategory, 1) log.Errorf("Initial WatchSrvKeyspace failed for %v/%v: %v", cell, keyspace, current.Err) - if cacheValid { - return entry.value, nil + if time.Since(entry.lastValueTime) > server.cacheTTL { + log.Errorf("WatchSrvKeyspace clearing cached entry for %v/%v", cell, keyspace) + entry.value = nil } - return nil, current.Err + entry.watchState = watchStateIdle + close(entry.watchStartingChan) + entry.watchStartingChan = nil + entry.mutex.Unlock() + return } // we are now watching, cache the first notification - entry.watchRunning = true + entry.watchState = watchStateRunning + close(entry.watchStartingChan) + entry.watchStartingChan = nil entry.value = current.Value entry.lastValueTime = time.Now() + entry.lastError = nil entry.lastErrorCtx = nil - go func() { - defer cancel() - - for c := range changes { - if c.Err != nil { - // Watch errored out. - // - // Log it and store the error, but do not clear the value - // so it can be used until the ttl elapses unless the node - // was deleted. - err := fmt.Errorf("WatchSrvKeyspace failed for %v/%v: %v", cell, keyspace, c.Err) - log.Errorf("%v", err) - server.counts.Add(errorCategory, 1) - entry.mutex.Lock() - if c.Err == topo.ErrNoNode { - entry.value = nil - } - entry.watchRunning = false - entry.lastError = err - entry.lastErrorCtx = nil - entry.mutex.Unlock() - return + entry.lastErrorTime = time.Time{} + + entry.mutex.Unlock() + + defer cancel() + for c := range changes { + if c.Err != nil { + // Watch errored out. + // + // Log it and store the error, but do not clear the value + // so it can be used until the ttl elapses unless the node + // was deleted. + err := fmt.Errorf("WatchSrvKeyspace failed for %v/%v: %v", cell, keyspace, c.Err) + log.Errorf("%v", err) + server.counts.Add(errorCategory, 1) + entry.mutex.Lock() + if c.Err == topo.ErrNoNode { + entry.value = nil } + entry.watchState = watchStateIdle - // We got a new value, save it. - entry.mutex.Lock() - entry.value = c.Value + // Even though we didn't get a new value, update the lastValueTime + // here since the watch was successfully running before and we want + // the value to be cached for the full TTL from here onwards. entry.lastValueTime = time.Now() - entry.lastError = nil + + entry.lastError = err entry.lastErrorCtx = nil + entry.lastErrorTime = time.Now() entry.mutex.Unlock() + return } - }() - return entry.value, entry.lastError + // We got a new value, save it. + entry.mutex.Lock() + entry.value = c.Value + entry.lastError = nil + entry.lastErrorCtx = nil + entry.lastErrorTime = time.Time{} + entry.mutex.Unlock() + } } var watchSrvVSchemaSleepTime = 5 * time.Second @@ -436,10 +558,12 @@ func (server *ResilientServer) WatchSrvVSchema(ctx context.Context, cell string, // SrvKeyspaceNamesCacheStatus is the current value for SrvKeyspaceNames type SrvKeyspaceNamesCacheStatus struct { - Cell string - Value []string - LastError error - LastErrorCtx context.Context + Cell string + Value []string + ExpirationTime time.Time + LastQueryTime time.Time + LastError error + LastErrorCtx context.Context } // SrvKeyspaceNamesCacheStatusList is used for sorting @@ -462,11 +586,13 @@ func (skncsl SrvKeyspaceNamesCacheStatusList) Swap(i, j int) { // SrvKeyspaceCacheStatus is the current value for a SrvKeyspace object type SrvKeyspaceCacheStatus struct { - Cell string - Keyspace string - Value *topodatapb.SrvKeyspace - LastError error - LastErrorCtx context.Context + Cell string + Keyspace string + Value *topodatapb.SrvKeyspace + ExpirationTime time.Time + LastErrorTime time.Time + LastError error + LastErrorCtx context.Context } // StatusAsHTML returns an HTML version of our status. @@ -532,23 +658,34 @@ func (server *ResilientServer) CacheStatus() *ResilientServerCacheStatus { for _, entry := range server.srvKeyspaceNamesCache { entry.mutex.Lock() + result.SrvKeyspaceNames = append(result.SrvKeyspaceNames, &SrvKeyspaceNamesCacheStatus{ - Cell: entry.cell, - Value: entry.value, - LastError: entry.lastError, - LastErrorCtx: entry.lastErrorCtx, + Cell: entry.cell, + Value: entry.value, + ExpirationTime: entry.insertionTime.Add(server.cacheTTL), + LastQueryTime: entry.lastQueryTime, + LastError: entry.lastError, + LastErrorCtx: entry.lastErrorCtx, }) entry.mutex.Unlock() } for _, entry := range server.srvKeyspaceCache { entry.mutex.RLock() + + expirationTime := time.Now().Add(server.cacheTTL) + if entry.watchState != watchStateRunning { + expirationTime = entry.lastValueTime.Add(server.cacheTTL) + } + result.SrvKeyspaces = append(result.SrvKeyspaces, &SrvKeyspaceCacheStatus{ - Cell: entry.cell, - Keyspace: entry.keyspace, - Value: entry.value, - LastError: entry.lastError, - LastErrorCtx: entry.lastErrorCtx, + Cell: entry.cell, + Keyspace: entry.keyspace, + Value: entry.value, + ExpirationTime: expirationTime, + LastErrorTime: entry.lastErrorTime, + LastError: entry.lastError, + LastErrorCtx: entry.lastErrorCtx, }) entry.mutex.RUnlock() } @@ -561,3 +698,25 @@ func (server *ResilientServer) CacheStatus() *ResilientServerCacheStatus { return result } + +// Returns the ttl for the cached entry or "Expired" if it is in the past +func ttlTime(expirationTime time.Time) template.HTML { + ttl := time.Until(expirationTime).Round(time.Second) + if ttl < 0 { + return template.HTML("Expired") + } + return template.HTML(ttl.String()) +} + +func timeSince(t time.Time) template.HTML { + return template.HTML(time.Since(t).Round(time.Second).String()) +} + +var statusFuncs = template.FuncMap{ + "github_com_youtube_vitess_srvtopo_ttl_time": ttlTime, + "github_com_youtube_vitess_srvtopo_time_since": timeSince, +} + +func init() { + servenv.AddStatusFuncs(statusFuncs) +} diff --git a/go/vt/srvtopo/resilient_server_flaky_test.go b/go/vt/srvtopo/resilient_server_flaky_test.go index ad71e31d548..c244d3febe3 100644 --- a/go/vt/srvtopo/resilient_server_flaky_test.go +++ b/go/vt/srvtopo/resilient_server_flaky_test.go @@ -82,7 +82,14 @@ func TestGetSrvKeyspace(t *testing.T) { } // make sure the HTML template works - templ := template.New("").Funcs(status.StatusFuncs) + funcs := map[string]interface{}{} + for k, v := range status.StatusFuncs { + funcs[k] = v + } + for k, v := range statusFuncs { + funcs[k] = v + } + templ := template.New("").Funcs(funcs) templ, err = templ.Parse(TopoTemplate) if err != nil { t.Fatalf("error parsing template: %v", err) @@ -130,11 +137,12 @@ func TestGetSrvKeyspace(t *testing.T) { // Now simulate a topo service error and see that the last value is // cached for at least half of the expected ttl. + errorTestStart := time.Now() errorReqsBefore, _ := rs.counts.Counts()[errorCategory] forceErr := fmt.Errorf("test topo error") factory.SetError(forceErr) - expiry = updateTime.Add(*srvTopoCacheTTL / 2) + expiry = time.Now().Add(*srvTopoCacheTTL / 2) for { got, err = rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") if err != nil || !proto.Equal(want, got) { @@ -176,7 +184,7 @@ func TestGetSrvKeyspace(t *testing.T) { factory.SetError(nil) _, err = rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") if err == nil || err != forceErr { - t.Fatalf("expected error to be cached") + t.Errorf("expected error to be cached") } // Now sleep for the rest of the interval and we should get the value again @@ -186,12 +194,50 @@ func TestGetSrvKeyspace(t *testing.T) { t.Errorf("expected value to be restored, got %v", err) } - // Check that there were three errors counted during the interval, - // one for the original watch failing, then three more attempts to - // re-establish the watch + // Now sleep for the full TTL before setting the error again to test + // that even when there is no activity on the key, it is still cached + // for the full configured TTL. + time.Sleep(*srvTopoCacheTTL) + forceErr = fmt.Errorf("another test topo error") + factory.SetError(forceErr) + + expiry = time.Now().Add(*srvTopoCacheTTL / 2) + for { + _, err = rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") + if err != nil { + t.Fatalf("value should have been cached for the full ttl") + } + if time.Now().After(expiry) { + break + } + time.Sleep(time.Millisecond) + } + + // Wait again until the TTL expires and we get the error + expiry = time.Now().Add(time.Second) + for { + _, err = rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") + if err != nil { + if err == forceErr { + break + } + t.Fatalf("expected %v got %v", forceErr, err) + } + + if time.Now().After(expiry) { + t.Fatalf("timed out waiting for error") + } + time.Sleep(time.Millisecond) + } + + factory.SetError(nil) + + // Check that the expected number of errors were counted during the + // interval errorReqs, _ := rs.counts.Counts()[errorCategory] - if errorReqs-errorReqsBefore != 4 { - t.Errorf("expected 4 error requests got %d", errorReqs-errorReqsBefore) + expectedErrors := int64(time.Since(errorTestStart) / *srvTopoCacheRefresh) + if errorReqs-errorReqsBefore > expectedErrors { + t.Errorf("expected <= %v error requests got %d", expectedErrors, errorReqs-errorReqsBefore) } // Check that the watch now works to update the value @@ -211,6 +257,64 @@ func TestGetSrvKeyspace(t *testing.T) { } time.Sleep(time.Millisecond) } + + // Now test with a new error in which the topo service is locked during + // the test which prevents all queries from proceeding. + forceErr = fmt.Errorf("test topo error with factory locked") + factory.SetError(forceErr) + factory.Lock() + go func() { + time.Sleep(*srvTopoCacheRefresh * 2) + factory.Unlock() + }() + + expiry = time.Now().Add(*srvTopoCacheTTL / 2) + for { + got, err = rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") + if err != nil || !proto.Equal(want, got) { + // On a slow test machine it is possible that we never end up + // verifying the value is cached because it could take too long to + // even get into this loop... so log this as an informative message + // but don't fail the test + if time.Now().After(expiry) { + t.Logf("test execution was too slow -- caching was not verified") + break + } + + t.Errorf("expected keyspace to be cached for at least %s seconds, got error %v", time.Since(updateTime), err) + } + + if time.Now().After(expiry) { + break + } + + time.Sleep(time.Millisecond) + } + + // Clear the error, wait for things to proceed again + factory.SetError(nil) + time.Sleep(*srvTopoCacheTTL) + + got, err = rs.GetSrvKeyspace(context.Background(), "test_cell", "test_ks") + if err != nil || !proto.Equal(want, got) { + t.Errorf("expected error to clear, got %v", err) + } + + // Force another error and lock the topo. Then wait for the TTL to + // expire and verify that the context timeout unblocks the request. + forceErr = fmt.Errorf("force long test error") + factory.SetError(forceErr) + factory.Lock() + + time.Sleep(*srvTopoCacheTTL) + + timeoutCtx, _ := context.WithTimeout(context.Background(), *srvTopoCacheRefresh*2) + _, err = rs.GetSrvKeyspace(timeoutCtx, "test_cell", "test_ks") + wantErr := "timed out waiting for keyspace" + if err == nil || err.Error() != wantErr { + t.Errorf("expected error '%v', got '%v'", wantErr, err.Error()) + } + factory.Unlock() } // TestSrvKeyspaceCachedError will test we properly re-try to query @@ -407,6 +511,14 @@ func TestGetSrvKeyspaceNames(t *testing.T) { forceErr := fmt.Errorf("force test error") factory.SetError(forceErr) + // Lock the topo for half the duration of the cache TTL to ensure our + // requests aren't blocked + factory.Lock() + go func() { + time.Sleep(*srvTopoCacheTTL / 2) + factory.Unlock() + }() + // Check that we get the cached value until at least the refresh interval // elapses but before the TTL expires start := time.Now() @@ -474,7 +586,23 @@ func TestGetSrvKeyspaceNames(t *testing.T) { } errorReqs, ok := rs.counts.Counts()[errorCategory] - if !ok || errorReqs != 1 { - t.Errorf("expected 1 error request got %v", errorReqs) + if !ok || errorReqs == 0 { + t.Errorf("expected non-zero error requests got %v", errorReqs) + } + + // Force another error and lock the topo. Then wait for the TTL to + // expire and verify that the context timeout unblocks the request. + forceErr = fmt.Errorf("force long test error") + factory.SetError(forceErr) + factory.Lock() + + time.Sleep(*srvTopoCacheTTL) + + timeoutCtx, _ := context.WithTimeout(context.Background(), *srvTopoCacheRefresh*2) + _, err = rs.GetSrvKeyspaceNames(timeoutCtx, "test_cell") + wantErr := "timed out waiting for keyspace names" + if err == nil || err.Error() != wantErr { + t.Errorf("expected error '%v', got '%v'", wantErr, err.Error()) } + factory.Unlock() } diff --git a/go/vt/srvtopo/resolve.go b/go/vt/srvtopo/resolve.go index 08ccd40b992..878b7dbbec8 100644 --- a/go/vt/srvtopo/resolve.go +++ b/go/vt/srvtopo/resolve.go @@ -18,16 +18,13 @@ package srvtopo import ( "encoding/hex" - "sort" "github.com/youtube/vitess/go/vt/key" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" "golang.org/x/net/context" - querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" - vtgatepb "github.com/youtube/vitess/go/vt/proto/vtgate" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) @@ -66,17 +63,6 @@ func GetAnyShard(ctx context.Context, topoServ Server, cell, keyspace string, ta return keyspace, allShards[0].Name, nil } -// GetAllKeyspaces returns all the known keyspaces in a shard. -func GetAllKeyspaces(ctx context.Context, topoServ Server, cell string) ([]string, error) { - keyspaces, err := topoServ.GetSrvKeyspaceNames(ctx, cell) - if err != nil { - return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "keyspace names fetch error: %v", err) - } - sort.Strings(keyspaces) - - return keyspaces, nil -} - // GetKeyspaceShards return all the shards in a keyspace. It follows // redirection if ServedFrom is set. func GetKeyspaceShards(ctx context.Context, topoServ Server, cell, keyspace string, tabletType topodatapb.TabletType) (string, *topodatapb.SrvKeyspace, []*topodatapb.ShardReference, error) { @@ -117,23 +103,6 @@ func GetShardForKeyspaceID(allShards []*topodatapb.ShardReference, keyspaceID [] return "", vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "KeyspaceId %v didn't match any shards %+v", hex.EncodeToString(keyspaceID), allShards) } -// MapEntityIdsToShards returns a map of shards to values to use in that shard. -func MapEntityIdsToShards(ctx context.Context, topoServ Server, cell, keyspace string, entityIds []*vtgatepb.ExecuteEntityIdsRequest_EntityId, tabletType topodatapb.TabletType) (string, map[string][]*querypb.Value, error) { - keyspace, _, allShards, err := GetKeyspaceShards(ctx, topoServ, cell, keyspace, tabletType) - if err != nil { - return "", nil, err - } - var shards = make(map[string][]*querypb.Value) - for _, eid := range entityIds { - shard, err := GetShardForKeyspaceID(allShards, eid.KeyspaceId) - if err != nil { - return "", nil, err - } - shards[shard] = append(shards[shard], &querypb.Value{Type: eid.Type, Value: eid.Value}) - } - return keyspace, shards, nil -} - // MapKeyRangesToShards returns the set of shards that "intersect" // with a collection of key-ranges; that is, a shard is included if // and only if its corresponding key-space ids are in one of the key-ranges. @@ -144,7 +113,7 @@ func MapKeyRangesToShards(ctx context.Context, topoServ Server, cell, keyspace s } uniqueShards := make(map[string]bool) for _, kr := range krs { - ResolveKeyRangeToShards(allShards, uniqueShards, kr) + keyRangeToShardMap(allShards, uniqueShards, kr) } var res = make([]string, 0, len(uniqueShards)) for s := range uniqueShards { @@ -153,8 +122,8 @@ func MapKeyRangesToShards(ctx context.Context, topoServ Server, cell, keyspace s return keyspace, res, nil } -// ResolveKeyRangeToShards maps a list of keyranges to shard names. -func ResolveKeyRangeToShards(allShards []*topodatapb.ShardReference, matches map[string]bool, kr *topodatapb.KeyRange) { +// keyRangeToShardMap adds shards to a map based on the input KeyRange. +func keyRangeToShardMap(allShards []*topodatapb.ShardReference, matches map[string]bool, kr *topodatapb.KeyRange) { if !key.KeyRangeIsPartial(kr) { for _, shard := range allShards { matches[shard.Name] = true @@ -168,6 +137,18 @@ func ResolveKeyRangeToShards(allShards []*topodatapb.ShardReference, matches map } } +// GetShardsForKeyRange maps keyranges to shards. +func GetShardsForKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb.KeyRange) []string { + isPartial := key.KeyRangeIsPartial(kr) + var shards []string + for _, shard := range allShards { + if !isPartial || key.KeyRangesIntersect(kr, shard.KeyRange) { + shards = append(shards, shard.Name) + } + } + return shards +} + // MapExactShards maps a keyrange to shards only if there's a complete // match. If there's any partial match the function returns no match. func MapExactShards(ctx context.Context, topoServ Server, cell, keyspace string, tabletType topodatapb.TabletType, kr *topodatapb.KeyRange) (newkeyspace string, shards []string, err error) { diff --git a/go/vt/srvtopo/resolve_test.go b/go/vt/srvtopo/resolve_test.go index 475ebf281d4..bc91ba8d92a 100644 --- a/go/vt/srvtopo/resolve_test.go +++ b/go/vt/srvtopo/resolve_test.go @@ -134,6 +134,80 @@ func TestMapKeyRangesToShards(t *testing.T) { } } +func TestGetShardsForKeyRange(t *testing.T) { + ctx := context.Background() + rs, err := initTopo("TestGetShardsForKeyRange") + if err != nil { + t.Fatal(err) + } + _, _, allShards, err := GetKeyspaceShards(ctx, rs, "cell1", "sks", topodatapb.TabletType_MASTER) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + input *topodatapb.KeyRange + output []string + }{{ + input: &topodatapb.KeyRange{ + Start: []byte{0x40}, + End: []byte{0x60}, + }, + output: []string{ + "40-60", + }, + }, { + input: &topodatapb.KeyRange{ + Start: []byte{0x40}, + End: []byte{0x80}, + }, + output: []string{ + "40-60", + "60-80", + }, + }, { + input: &topodatapb.KeyRange{ + Start: []byte{0x50}, + End: []byte{0x70}, + }, + output: []string{ + "40-60", + "60-80", + }, + }, { + input: &topodatapb.KeyRange{}, + output: []string{ + "-20", + "20-40", + "40-60", + "60-80", + "80-a0", + "a0-c0", + "c0-e0", + "e0-", + }, + }, { + input: nil, + output: []string{ + "-20", + "20-40", + "40-60", + "60-80", + "80-a0", + "a0-c0", + "c0-e0", + "e0-", + }, + }} + + for _, testCase := range testCases { + shards := GetShardsForKeyRange(allShards, testCase.input) + if !reflect.DeepEqual(shards, testCase.output) { + t.Errorf("GetShardsForKeyRange(%s): %v, want %v", key.KeyRangeString(testCase.input), shards, testCase.output) + } + } +} + func TestMapExactShards(t *testing.T) { ctx := context.Background() rs, err := initTopo("TestMapExactShards") @@ -201,6 +275,6 @@ func BenchmarkResolveKeyRangeToShards(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - ResolveKeyRangeToShards(allShards, uniqueShards, kr) + keyRangeToShardMap(allShards, uniqueShards, kr) } } diff --git a/go/vt/srvtopo/resolver.go b/go/vt/srvtopo/resolver.go new file mode 100644 index 00000000000..5378dd0f6bd --- /dev/null +++ b/go/vt/srvtopo/resolver.go @@ -0,0 +1,470 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package srvtopo + +import ( + "sort" + + "github.com/golang/protobuf/proto" + "github.com/youtube/vitess/go/vt/key" + "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vterrors" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "golang.org/x/net/context" + + querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + vtgatepb "github.com/youtube/vitess/go/vt/proto/vtgate" + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" +) + +// A Resolver can resolve keyspace ids and key ranges into ResolvedShard* +// objects. It uses an underlying srvtopo.Server to find the topology, +// and a TargetStats object to find the healthy destinations. +type Resolver struct { + // topoServ is the srvtopo.Server to use for topo queries. + topoServ Server + + // stats provides the health information. + stats TargetStats + + // localCell is the local cell for the queries. + localCell string + + // FIXME(alainjobart) also need a list of remote cells. + // FIXME(alainjobart) and a policy on how to use them. + // But for now we only use the local cell. +} + +// NewResolver creates a new Resolver. +func NewResolver(topoServ Server, stats TargetStats, localCell string) *Resolver { + return &Resolver{ + topoServ: topoServ, + stats: stats, + localCell: localCell, + } +} + +// ResolvedShard contains everything we need to send a query to a shard. +type ResolvedShard struct { + // Target describes the target shard. + Target *querypb.Target + + // QueryService is the actual way to execute the query. + QueryService queryservice.QueryService +} + +// ResolvedShardEqual is an equality check on *ResolvedShard. +func ResolvedShardEqual(rs1, rs2 *ResolvedShard) bool { + return proto.Equal(rs1.Target, rs2.Target) +} + +// ResolvedShardsEqual is an equality check on []*ResolvedShard. +func ResolvedShardsEqual(rss1, rss2 []*ResolvedShard) bool { + if len(rss1) != len(rss2) { + return false + } + for i, rs1 := range rss1 { + if !ResolvedShardEqual(rs1, rss2[i]) { + return false + } + } + return true +} + +// GetKeyspaceShards return all the shards in a keyspace. It follows +// redirection if ServedFrom is set. It is only valid for the local cell. +// Do not use it to further resolve shards, instead use the Resolve* methods. +func (r *Resolver) GetKeyspaceShards(ctx context.Context, keyspace string, tabletType topodatapb.TabletType) (string, *topodatapb.SrvKeyspace, []*topodatapb.ShardReference, error) { + srvKeyspace, err := r.topoServ.GetSrvKeyspace(ctx, r.localCell, keyspace) + if err != nil { + return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "keyspace %v fetch error: %v", keyspace, err) + } + + // check if the keyspace has been redirected for this tabletType. + for _, sf := range srvKeyspace.ServedFrom { + if sf.TabletType == tabletType { + keyspace = sf.Keyspace + srvKeyspace, err = r.topoServ.GetSrvKeyspace(ctx, r.localCell, keyspace) + if err != nil { + return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "keyspace %v fetch error: %v", keyspace, err) + } + } + } + + partition := topoproto.SrvKeyspaceGetPartition(srvKeyspace, tabletType) + if partition == nil { + return "", nil, nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "No partition found for tabletType %v in keyspace %v", topoproto.TabletTypeLString(tabletType), keyspace) + } + return keyspace, srvKeyspace, partition.ShardReferences, nil +} + +// ResolveKeyspaceIds turns a list of KeyspaceIds into a list of ResolvedShard. +// The returned ResolvedShard objects can then be used to execute the queries. +func (r *Resolver) ResolveKeyspaceIds(ctx context.Context, keyspace string, tabletType topodatapb.TabletType, keyspaceIds [][]byte) ([]*ResolvedShard, error) { + keyspace, _, allShards, err := r.GetKeyspaceShards(ctx, keyspace, tabletType) + if err != nil { + return nil, err + } + var res []*ResolvedShard + visited := make(map[string]bool) + for _, ksID := range keyspaceIds { + shard, err := GetShardForKeyspaceID(allShards, ksID) + if err != nil { + return nil, err + } + if !visited[shard] { + // First time we see this shard. + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: r.localCell, + } + _, qs, err := r.stats.GetAggregateStats(target) + if err != nil { + return nil, resolverError(err, target) + } + + // FIXME(alainjobart) we ignore the stats for now. + // Later we can fallback to another cell if needed. + // We would then need to read the SrvKeyspace there too. + target.Cell = "" + res = append(res, &ResolvedShard{ + Target: target, + QueryService: qs, + }) + visited[shard] = true + } + } + return res, nil +} + +// GetAnyShard returns a ResolvedShard object for a random shard in the +// keyspace. In practice, the implementation now returns the first one. +func (r *Resolver) GetAnyShard(ctx context.Context, keyspace string, tabletType topodatapb.TabletType) (*ResolvedShard, error) { + keyspace, _, allShards, err := r.GetKeyspaceShards(ctx, keyspace, tabletType) + if err != nil { + return nil, err + } + if len(allShards) == 0 { + return nil, vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "keyspace: %v, tabletType: %v, no shard", keyspace, topoproto.TabletTypeLString(tabletType)) + } + + shard := allShards[0].Name + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: r.localCell, + } + _, qs, err := r.stats.GetAggregateStats(target) + if err != nil { + return nil, resolverError(err, target) + } + target.Cell = "" + return &ResolvedShard{ + Target: target, + QueryService: qs, + }, nil +} + +// GetAllShards returns the list of ResolvedShards associated with all +// the shards in a keyspace. +func (r *Resolver) GetAllShards(ctx context.Context, keyspace string, tabletType topodatapb.TabletType) ([]*ResolvedShard, *topodatapb.SrvKeyspace, error) { + keyspace, srvKeyspace, allShards, err := r.GetKeyspaceShards(ctx, keyspace, tabletType) + if err != nil { + return nil, nil, err + } + + res := make([]*ResolvedShard, len(allShards)) + for i, shard := range allShards { + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard.Name, + TabletType: tabletType, + Cell: r.localCell, + } + _, qs, err := r.stats.GetAggregateStats(target) + if err != nil { + return nil, nil, resolverError(err, target) + } + + // FIXME(alainjobart) we ignore the stats for now. + // Later we can fallback to another cell if needed. + // We would then need to read the SrvKeyspace there too. + target.Cell = "" + res[i] = &ResolvedShard{ + Target: target, + QueryService: qs, + } + } + return res, srvKeyspace, nil +} + +// GetAllKeyspaces returns all the known keyspaces in the local cell. +func (r *Resolver) GetAllKeyspaces(ctx context.Context) ([]string, error) { + keyspaces, err := r.topoServ.GetSrvKeyspaceNames(ctx, r.localCell) + if err != nil { + return nil, vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "keyspace names fetch error: %v", err) + } + // FIXME(alainjobart) this should be unnecessary. The results + // of ListDir are sorted, and that's the underlying topo code. + // But the tests depend on this behavior now. + sort.Strings(keyspaces) + return keyspaces, nil +} + +// ResolveShards returns the list of ResolvedShards associated with the +// Shard list. +// FIXME(alainjobart) this should first resolve keyspace to allow for redirects. +func (r *Resolver) ResolveShards(ctx context.Context, keyspace string, shards []string, tabletType topodatapb.TabletType) ([]*ResolvedShard, error) { + res := make([]*ResolvedShard, 0, len(shards)) + visited := make(map[string]bool) + for _, shard := range shards { + // Make sure we don't duplicate shards. + if visited[shard] { + continue + } + visited[shard] = true + + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: r.localCell, + } + _, qs, err := r.stats.GetAggregateStats(target) + if err != nil { + return nil, resolverError(err, target) + } + + // FIXME(alainjobart) we ignore the stats for now. + // Later we can fallback to another cell if needed. + target.Cell = "" + res = append(res, &ResolvedShard{ + Target: target, + QueryService: qs, + }) + } + return res, nil +} + +// ResolveExactShards resolves a keyrange to shards only if there's a complete +// match. If there's any partial match the function returns no match. +func (r *Resolver) ResolveExactShards(ctx context.Context, keyspace string, tabletType topodatapb.TabletType, kr *topodatapb.KeyRange) ([]*ResolvedShard, error) { + keyspace, _, allShards, err := r.GetKeyspaceShards(ctx, keyspace, tabletType) + if err != nil { + return nil, err + } + var res []*ResolvedShard + shardnum := 0 + for shardnum < len(allShards) { + if key.KeyRangeStartEqual(kr, allShards[shardnum].KeyRange) { + break + } + shardnum++ + } + for shardnum < len(allShards) { + if !key.KeyRangesIntersect(kr, allShards[shardnum].KeyRange) { + // If we are over the requested keyrange, we + // can stop now, we won't find more. + break + } + + target := &querypb.Target{ + Keyspace: keyspace, + Shard: allShards[shardnum].Name, + TabletType: tabletType, + Cell: r.localCell, + } + _, qs, err := r.stats.GetAggregateStats(target) + if err != nil { + return nil, resolverError(err, target) + } + + // FIXME(alainjobart) we ignore the stats for now. + // Later we can fallback to another cell if needed. + target.Cell = "" + res = append(res, &ResolvedShard{ + Target: target, + QueryService: qs, + }) + if key.KeyRangeEndEqual(kr, allShards[shardnum].KeyRange) { + return res, nil + } + shardnum++ + } + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyrange %v does not exactly match shards", key.KeyRangeString(kr)) +} + +// ResolveKeyRanges returns the set of shards that "intersect" +// with a collection of key-ranges; that is, a shard is included if +// and only if its corresponding key-space ids are in one of the key-ranges. +func (r *Resolver) ResolveKeyRanges(ctx context.Context, keyspace string, tabletType topodatapb.TabletType, krs []*topodatapb.KeyRange) ([]*ResolvedShard, error) { + keyspace, _, allShards, err := r.GetKeyspaceShards(ctx, keyspace, tabletType) + if err != nil { + return nil, err + } + var res []*ResolvedShard + visited := make(map[string]bool) + for _, kr := range krs { + for _, shard := range allShards { + if !key.KeyRangesIntersect(kr, shard.KeyRange) { + // We don't need that shard. + continue + } + if visited[shard.Name] { + // We've already added that shard. + continue + } + // We need to add this shard. + visited[shard.Name] = true + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard.Name, + TabletType: tabletType, + Cell: r.localCell, + } + _, qs, err := r.stats.GetAggregateStats(target) + if err != nil { + return nil, resolverError(err, target) + } + + // FIXME(alainjobart) we ignore the stats for now. + // Later we can fallback to another cell if needed. + // We would then need to read the SrvKeyspace there too. + target.Cell = "" + res = append(res, &ResolvedShard{ + Target: target, + QueryService: qs, + }) + } + } + return res, nil +} + +// ResolveEntityIds returns a list of shards and values to use in that shard. +func (r *Resolver) ResolveEntityIds(ctx context.Context, keyspace string, entityIds []*vtgatepb.ExecuteEntityIdsRequest_EntityId, tabletType topodatapb.TabletType) ([]*ResolvedShard, [][]*querypb.Value, error) { + keyspace, _, allShards, err := r.GetKeyspaceShards(ctx, keyspace, tabletType) + if err != nil { + return nil, nil, err + } + var result []*ResolvedShard + var values [][]*querypb.Value + resolved := make(map[string]int) + for _, eid := range entityIds { + shard, err := GetShardForKeyspaceID(allShards, eid.KeyspaceId) + if err != nil { + return nil, nil, err + } + i, ok := resolved[shard] + if !ok { + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: r.localCell, + } + _, qs, err := r.stats.GetAggregateStats(target) + if err != nil { + return nil, nil, resolverError(err, target) + } + + // FIXME(alainjobart) we ignore the stats for now. + // Later we can fallback to another cell if needed. + // We would then need to read the SrvKeyspace there too. + target.Cell = "" + i = len(result) + result = append(result, &ResolvedShard{ + Target: target, + QueryService: qs, + }) + values = append(values, nil) + resolved[shard] = i + } + values[i] = append(values[i], &querypb.Value{Type: eid.Type, Value: eid.Value}) + } + return result, values, nil +} + +// ResolveKeyspaceIdsValues resolves keyspace IDs and values into their +// respective shards. Same logic as ResolveEntityIds. +func (r *Resolver) ResolveKeyspaceIdsValues(ctx context.Context, keyspace string, ids []*querypb.Value, ksids [][]byte, tabletType topodatapb.TabletType) ([]*ResolvedShard, [][]*querypb.Value, error) { + keyspace, _, allShards, err := r.GetKeyspaceShards(ctx, keyspace, tabletType) + if err != nil { + return nil, nil, err + } + var result []*ResolvedShard + var values [][]*querypb.Value + resolved := make(map[string]int) + for i, id := range ids { + shard, err := GetShardForKeyspaceID(allShards, ksids[i]) + if err != nil { + return nil, nil, err + } + in, ok := resolved[shard] + if !ok { + target := &querypb.Target{ + Keyspace: keyspace, + Shard: shard, + TabletType: tabletType, + Cell: r.localCell, + } + _, qs, err := r.stats.GetAggregateStats(target) + if err != nil { + return nil, nil, resolverError(err, target) + } + + // FIXME(alainjobart) we ignore the stats for now. + // Later we can fallback to another cell if needed. + // We would then need to read the SrvKeyspace there too. + target.Cell = "" + in = len(result) + result = append(result, &ResolvedShard{ + Target: target, + QueryService: qs, + }) + values = append(values, nil) + resolved[shard] = in + } + values[in] = append(values[in], id) + } + return result, values, nil +} + +// ValuesEqual is a helper method to compare arrays of values. +func ValuesEqual(vss1, vss2 [][]*querypb.Value) bool { + if len(vss1) != len(vss2) { + return false + } + for i, vs1 := range vss1 { + if len(vs1) != len(vss2[i]) { + return false + } + for j, v1 := range vs1 { + if !proto.Equal(v1, vss2[i][j]) { + return false + } + } + } + return true +} + +func resolverError(in error, target *querypb.Target) error { + return vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "target: %s.%s.%s, no valid tablet: %v", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), in) +} diff --git a/go/vt/srvtopo/target_stats.go b/go/vt/srvtopo/target_stats.go new file mode 100644 index 00000000000..48806f06e1d --- /dev/null +++ b/go/vt/srvtopo/target_stats.go @@ -0,0 +1,130 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package srvtopo + +import ( + "fmt" + + querypb "github.com/youtube/vitess/go/vt/proto/query" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" +) + +// TargetStats is an interface that the srvtopo module uses to handle +// routing of queries. +// - discovery.TabletStatsCache will implement the discovery part of the +// interface, and discoverygateway will have the QueryService. +// - hybridgateway will also implement this interface: for each l2vtgate pool, +// it will establish a StreamHealth connection, and store the returned +// health stats. +type TargetStats interface { + // GetAggregateStats returns the aggregate stats for the given Target. + // The srvtopo module will use that information to route queries + // to the right cell. Also returns the QueryService to use to + // reach that target. + // Can return topo.ErrNoNode if the target has no stats. + GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, queryservice.QueryService, error) + + // GetMasterCell returns the master location for a keyspace/shard. + // Since there is only one master for a shard, we only need to + // know its cell to complete the Target. Also returns the QueryService + // to use to reach that target. + GetMasterCell(keyspace, shard string) (cell string, qs queryservice.QueryService, err error) +} + +// TargetStatsListener is an interface used to propagate TargetStats changes. +// - discovery.TabletStatsCache will implement this interface. +// - the StreamHealth method in l2vtgate will use this interface to surface +// the health of its targets. +type TargetStatsListener interface { + // Subscribe will return the current full state of the TargetStats, + // and a channel that will receive subsequent updates. The int returned + // is the channel id, and can be sent to unsubscribe to stop + // notifications. + Subscribe() (int, []TargetStatsEntry, <-chan (*TargetStatsEntry), error) + + // Unsubscribe stops sending updates to the channel returned + // by Subscribe. The channel still needs to be drained to + // avoid deadlocks. + Unsubscribe(int) error +} + +// TargetStatsEntry has the updated information for a Target. +type TargetStatsEntry struct { + // Target is what this entry applies to. + Target *querypb.Target + + // Stats is the aggregate stats for this entry. + Stats *querypb.AggregateStats + + // TabletExternallyReparentedTimestamp is the latest timestamp + // that was reported for this entry. It applies to masters only. + TabletExternallyReparentedTimestamp int64 +} + +// TargetStatsMultiplexer is a helper class to help broadcast stats updates. +// It doesn't have any synchronization, as the container class will already +// have some and this can just use it. +type TargetStatsMultiplexer struct { + // listeners has the map of channels to send updates to. + listeners map[int]chan (*TargetStatsEntry) + + // nextIndex has the next map id. + nextIndex int +} + +// NewTargetStatsMultiplexer returns an initialized TargetStatsMultiplexer. +func NewTargetStatsMultiplexer() TargetStatsMultiplexer { + return TargetStatsMultiplexer{ + listeners: make(map[int]chan (*TargetStatsEntry)), + } +} + +// Subscribe adds a channel to the list. +// Will change the list. +func (tsm *TargetStatsMultiplexer) Subscribe() (int, <-chan (*TargetStatsEntry)) { + i := tsm.nextIndex + tsm.nextIndex++ + c := make(chan (*TargetStatsEntry), 100) + tsm.listeners[i] = c + return i, c +} + +// Unsubscribe removes a channel from the list. +// Will change the list. +func (tsm *TargetStatsMultiplexer) Unsubscribe(i int) error { + c, ok := tsm.listeners[i] + if !ok { + return fmt.Errorf("TargetStatsMultiplexer.Unsubscribe(%v): not suc channel", i) + } + delete(tsm.listeners, i) + close(c) + return nil +} + +// HasSubscribers returns true if we have registered subscribers. +// Will read the list. +func (tsm *TargetStatsMultiplexer) HasSubscribers() bool { + return len(tsm.listeners) > 0 +} + +// Broadcast sends an update to the list. +// Will read the list. +func (tsm *TargetStatsMultiplexer) Broadcast(tse *TargetStatsEntry) { + for _, c := range tsm.listeners { + c <- tse + } +} diff --git a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go b/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go index e7200e44269..3205e0082fd 100644 --- a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go +++ b/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go @@ -21,13 +21,14 @@ import ( "flag" "golang.org/x/net/context" + "google.golang.org/grpc" "github.com/youtube/vitess/go/vt/grpcclient" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" - "github.com/youtube/vitess/go/vt/proto/throttlerservice" "github.com/youtube/vitess/go/vt/throttler/throttlerclient" "github.com/youtube/vitess/go/vt/vterrors" - "google.golang.org/grpc" + + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" + throttlerservicepb "github.com/youtube/vitess/go/vt/proto/throttlerservice" ) var ( @@ -39,7 +40,7 @@ var ( type client struct { conn *grpc.ClientConn - gRPCClient throttlerservice.ThrottlerClient + gRPCClient throttlerservicepb.ThrottlerClient } func factory(addr string) (throttlerclient.Client, error) { @@ -51,7 +52,7 @@ func factory(addr string) (throttlerclient.Client, error) { if err != nil { return nil, err } - gRPCClient := throttlerservice.NewThrottlerClient(conn) + gRPCClient := throttlerservicepb.NewThrottlerClient(conn) return &client{conn, gRPCClient}, nil } @@ -59,7 +60,7 @@ func factory(addr string) (throttlerclient.Client, error) { // MaxRates is part of the throttlerclient.Client interface and returns the // current max rate for each throttler of the process. func (c *client) MaxRates(ctx context.Context) (map[string]int64, error) { - response, err := c.gRPCClient.MaxRates(ctx, &throttlerdata.MaxRatesRequest{}) + response, err := c.gRPCClient.MaxRates(ctx, &throttlerdatapb.MaxRatesRequest{}) if err != nil { return nil, vterrors.FromGRPC(err) } @@ -69,7 +70,7 @@ func (c *client) MaxRates(ctx context.Context) (map[string]int64, error) { // SetMaxRate is part of the throttlerclient.Client interface and sets the rate // on all throttlers of the server. func (c *client) SetMaxRate(ctx context.Context, rate int64) ([]string, error) { - request := &throttlerdata.SetMaxRateRequest{ + request := &throttlerdatapb.SetMaxRateRequest{ Rate: rate, } @@ -81,8 +82,8 @@ func (c *client) SetMaxRate(ctx context.Context, rate int64) ([]string, error) { } // GetConfiguration is part of the throttlerclient.Client interface. -func (c *client) GetConfiguration(ctx context.Context, throttlerName string) (map[string]*throttlerdata.Configuration, error) { - response, err := c.gRPCClient.GetConfiguration(ctx, &throttlerdata.GetConfigurationRequest{ +func (c *client) GetConfiguration(ctx context.Context, throttlerName string) (map[string]*throttlerdatapb.Configuration, error) { + response, err := c.gRPCClient.GetConfiguration(ctx, &throttlerdatapb.GetConfigurationRequest{ ThrottlerName: throttlerName, }) if err != nil { @@ -92,8 +93,8 @@ func (c *client) GetConfiguration(ctx context.Context, throttlerName string) (ma } // UpdateConfiguration is part of the throttlerclient.Client interface. -func (c *client) UpdateConfiguration(ctx context.Context, throttlerName string, configuration *throttlerdata.Configuration, copyZeroValues bool) ([]string, error) { - response, err := c.gRPCClient.UpdateConfiguration(ctx, &throttlerdata.UpdateConfigurationRequest{ +func (c *client) UpdateConfiguration(ctx context.Context, throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) { + response, err := c.gRPCClient.UpdateConfiguration(ctx, &throttlerdatapb.UpdateConfigurationRequest{ ThrottlerName: throttlerName, Configuration: configuration, CopyZeroValues: copyZeroValues, @@ -106,7 +107,7 @@ func (c *client) UpdateConfiguration(ctx context.Context, throttlerName string, // ResetConfiguration is part of the throttlerclient.Client interface. func (c *client) ResetConfiguration(ctx context.Context, throttlerName string) ([]string, error) { - response, err := c.gRPCClient.ResetConfiguration(ctx, &throttlerdata.ResetConfigurationRequest{ + response, err := c.gRPCClient.ResetConfiguration(ctx, &throttlerdatapb.ResetConfigurationRequest{ ThrottlerName: throttlerName, }) if err != nil { diff --git a/go/vt/throttler/grpcthrottlerserver/grpcthrottlerserver.go b/go/vt/throttler/grpcthrottlerserver/grpcthrottlerserver.go index d6cebc0b69c..144751fca48 100644 --- a/go/vt/throttler/grpcthrottlerserver/grpcthrottlerserver.go +++ b/go/vt/throttler/grpcthrottlerserver/grpcthrottlerserver.go @@ -25,8 +25,8 @@ import ( "github.com/youtube/vitess/go/vt/servenv" "github.com/youtube/vitess/go/vt/throttler" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" - "github.com/youtube/vitess/go/vt/proto/throttlerservice" + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" + throttlerservicepb "github.com/youtube/vitess/go/vt/proto/throttlerservice" ) // Server is the gRPC server implementation of the Throttler service. @@ -41,68 +41,68 @@ func NewServer(m throttler.Manager) *Server { // MaxRates implements the gRPC server interface. It returns the current max // rate for each throttler of the process. -func (s *Server) MaxRates(_ context.Context, request *throttlerdata.MaxRatesRequest) (_ *throttlerdata.MaxRatesResponse, err error) { +func (s *Server) MaxRates(_ context.Context, request *throttlerdatapb.MaxRatesRequest) (_ *throttlerdatapb.MaxRatesResponse, err error) { defer servenv.HandlePanic("throttler", &err) rates := s.manager.MaxRates() - return &throttlerdata.MaxRatesResponse{ + return &throttlerdatapb.MaxRatesResponse{ Rates: rates, }, nil } // SetMaxRate implements the gRPC server interface. It sets the rate on all // throttlers controlled by the manager. -func (s *Server) SetMaxRate(_ context.Context, request *throttlerdata.SetMaxRateRequest) (_ *throttlerdata.SetMaxRateResponse, err error) { +func (s *Server) SetMaxRate(_ context.Context, request *throttlerdatapb.SetMaxRateRequest) (_ *throttlerdatapb.SetMaxRateResponse, err error) { defer servenv.HandlePanic("throttler", &err) names := s.manager.SetMaxRate(request.Rate) - return &throttlerdata.SetMaxRateResponse{ + return &throttlerdatapb.SetMaxRateResponse{ Names: names, }, nil } // GetConfiguration implements the gRPC server interface. -func (s *Server) GetConfiguration(_ context.Context, request *throttlerdata.GetConfigurationRequest) (_ *throttlerdata.GetConfigurationResponse, err error) { +func (s *Server) GetConfiguration(_ context.Context, request *throttlerdatapb.GetConfigurationRequest) (_ *throttlerdatapb.GetConfigurationResponse, err error) { defer servenv.HandlePanic("throttler", &err) configurations, err := s.manager.GetConfiguration(request.ThrottlerName) if err != nil { return nil, err } - return &throttlerdata.GetConfigurationResponse{ + return &throttlerdatapb.GetConfigurationResponse{ Configurations: configurations, }, nil } // UpdateConfiguration implements the gRPC server interface. -func (s *Server) UpdateConfiguration(_ context.Context, request *throttlerdata.UpdateConfigurationRequest) (_ *throttlerdata.UpdateConfigurationResponse, err error) { +func (s *Server) UpdateConfiguration(_ context.Context, request *throttlerdatapb.UpdateConfigurationRequest) (_ *throttlerdatapb.UpdateConfigurationResponse, err error) { defer servenv.HandlePanic("throttler", &err) names, err := s.manager.UpdateConfiguration(request.ThrottlerName, request.Configuration, request.CopyZeroValues) if err != nil { return nil, err } - return &throttlerdata.UpdateConfigurationResponse{ + return &throttlerdatapb.UpdateConfigurationResponse{ Names: names, }, nil } // ResetConfiguration implements the gRPC server interface. -func (s *Server) ResetConfiguration(_ context.Context, request *throttlerdata.ResetConfigurationRequest) (_ *throttlerdata.ResetConfigurationResponse, err error) { +func (s *Server) ResetConfiguration(_ context.Context, request *throttlerdatapb.ResetConfigurationRequest) (_ *throttlerdatapb.ResetConfigurationResponse, err error) { defer servenv.HandlePanic("throttler", &err) names, err := s.manager.ResetConfiguration(request.ThrottlerName) if err != nil { return nil, err } - return &throttlerdata.ResetConfigurationResponse{ + return &throttlerdatapb.ResetConfigurationResponse{ Names: names, }, nil } // RegisterServer registers a new throttler server instance with the gRPC server. func RegisterServer(s *grpc.Server, m throttler.Manager) { - throttlerservice.RegisterThrottlerServer(s, NewServer(m)) + throttlerservicepb.RegisterThrottlerServer(s, NewServer(m)) } func init() { diff --git a/go/vt/throttler/manager.go b/go/vt/throttler/manager.go index d36e80ea2f8..7e2601ea198 100644 --- a/go/vt/throttler/manager.go +++ b/go/vt/throttler/manager.go @@ -22,7 +22,8 @@ import ( "sync" log "github.com/golang/glog" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" + + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" ) // GlobalManager is the per-process manager which manages all active throttlers. @@ -40,7 +41,7 @@ type Manager interface { // GetConfiguration returns the configuration of the MaxReplicationlag module // for the given throttler or all throttlers if "throttlerName" is empty. - GetConfiguration(throttlerName string) (map[string]*throttlerdata.Configuration, error) + GetConfiguration(throttlerName string) (map[string]*throttlerdatapb.Configuration, error) // UpdateConfiguration (partially) updates the configuration of the // MaxReplicationlag module for the given throttler or all throttlers if @@ -48,7 +49,7 @@ type Manager interface { // If "copyZeroValues" is true, fields with zero values will be copied // as well. // The function returns the names of the updated throttlers. - UpdateConfiguration(throttlerName string, configuration *throttlerdata.Configuration, copyZeroValues bool) ([]string, error) + UpdateConfiguration(throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) // ResetConfiguration resets the configuration of the MaxReplicationlag module // to the initial configuration for the given throttler or all throttlers if @@ -118,11 +119,11 @@ func (m *managerImpl) SetMaxRate(rate int64) []string { } // GetConfiguration implements the "Manager" interface. -func (m *managerImpl) GetConfiguration(throttlerName string) (map[string]*throttlerdata.Configuration, error) { +func (m *managerImpl) GetConfiguration(throttlerName string) (map[string]*throttlerdatapb.Configuration, error) { m.mu.Lock() defer m.mu.Unlock() - configurations := make(map[string]*throttlerdata.Configuration) + configurations := make(map[string]*throttlerdatapb.Configuration) if throttlerName != "" { t, ok := m.throttlers[throttlerName] @@ -140,7 +141,7 @@ func (m *managerImpl) GetConfiguration(throttlerName string) (map[string]*thrott } // UpdateConfiguration implements the "Manager" interface. -func (m *managerImpl) UpdateConfiguration(throttlerName string, configuration *throttlerdata.Configuration, copyZeroValues bool) ([]string, error) { +func (m *managerImpl) UpdateConfiguration(throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) { m.mu.Lock() defer m.mu.Unlock() diff --git a/go/vt/throttler/manager_test.go b/go/vt/throttler/manager_test.go index 7d3b2a408ac..9066926fdb9 100644 --- a/go/vt/throttler/manager_test.go +++ b/go/vt/throttler/manager_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" ) // We base our test data on these defaults. @@ -103,7 +103,7 @@ func TestManager_GetConfiguration(t *testing.T) { defer f.tearDown() // Test GetConfiguration() when all throttlers are returned. - want := map[string]*throttlerdata.Configuration{ + want := map[string]*throttlerdatapb.Configuration{ "t1": &defaultMaxReplicationLagModuleConfig.Configuration, "t2": &defaultMaxReplicationLagModuleConfig.Configuration, } @@ -116,7 +116,7 @@ func TestManager_GetConfiguration(t *testing.T) { } // Test GetConfiguration() when a specific throttler is requested. - wantT2 := map[string]*throttlerdata.Configuration{ + wantT2 := map[string]*throttlerdatapb.Configuration{ "t2": &defaultMaxReplicationLagModuleConfig.Configuration, } gotT2, err := f.m.GetConfiguration("t2") @@ -128,7 +128,7 @@ func TestManager_GetConfiguration(t *testing.T) { } // Now change the config and then reset it back. - newConfig := &throttlerdata.Configuration{ + newConfig := &throttlerdatapb.Configuration{ TargetReplicationLagSec: defaultTargetLag + 1, IgnoreNSlowestReplicas: defaultIgnoreNSlowestReplicas + 1, } @@ -172,7 +172,7 @@ func TestManager_UpdateConfiguration_Error(t *testing.T) { defer f.tearDown() // Check that errors from Verify() are correctly propagated. - invalidConfig := &throttlerdata.Configuration{ + invalidConfig := &throttlerdatapb.Configuration{ // max < 2 is not allowed. MaxReplicationLagSec: 1, } @@ -195,7 +195,7 @@ func TestManager_UpdateConfiguration_Partial(t *testing.T) { // Verify that a partial update only updates that one field. wantIgnoreNSlowestReplicas := defaultIgnoreNSlowestReplicas + 1 - partialConfig := &throttlerdata.Configuration{ + partialConfig := &throttlerdatapb.Configuration{ IgnoreNSlowestReplicas: wantIgnoreNSlowestReplicas, } names, err := f.m.UpdateConfiguration("t2", partialConfig, false /* copyZeroValues */) diff --git a/go/vt/throttler/max_replication_lag_module.go b/go/vt/throttler/max_replication_lag_module.go index 97fd55653c7..d974d6137f8 100644 --- a/go/vt/throttler/max_replication_lag_module.go +++ b/go/vt/throttler/max_replication_lag_module.go @@ -23,14 +23,13 @@ import ( "time" log "github.com/golang/glog" - "github.com/golang/protobuf/proto" "github.com/youtube/vitess/go/sync2" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" "github.com/youtube/vitess/go/vt/topo/topoproto" + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) @@ -204,7 +203,7 @@ func (m *MaxReplicationLagModule) applyLatestConfig() { } } -func (m *MaxReplicationLagModule) getConfiguration() *throttlerdata.Configuration { +func (m *MaxReplicationLagModule) getConfiguration() *throttlerdatapb.Configuration { m.mutableConfigMu.Lock() defer m.mutableConfigMu.Unlock() @@ -212,14 +211,14 @@ func (m *MaxReplicationLagModule) getConfiguration() *throttlerdata.Configuratio return &configCopy } -func (m *MaxReplicationLagModule) updateConfiguration(configuration *throttlerdata.Configuration, copyZeroValues bool) error { +func (m *MaxReplicationLagModule) updateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error { m.mutableConfigMu.Lock() defer m.mutableConfigMu.Unlock() newConfig := m.mutableConfig if copyZeroValues { - newConfig.Configuration = *proto.Clone(configuration).(*throttlerdata.Configuration) + newConfig.Configuration = *proto.Clone(configuration).(*throttlerdatapb.Configuration) } else { proto.Merge(&newConfig.Configuration, configuration) } diff --git a/go/vt/throttler/max_replication_lag_module_config.go b/go/vt/throttler/max_replication_lag_module_config.go index ef51b3f082c..4cd636529b1 100644 --- a/go/vt/throttler/max_replication_lag_module_config.go +++ b/go/vt/throttler/max_replication_lag_module_config.go @@ -20,14 +20,14 @@ import ( "fmt" "time" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" ) // MaxReplicationLagModuleConfig stores all configuration parameters for // MaxReplicationLagModule. Internally, the parameters are represented by a // protobuf message. This message is also used to update the parameters. type MaxReplicationLagModuleConfig struct { - throttlerdata.Configuration + throttlerdatapb.Configuration } // Most of the values are based on the assumption that vttablet is started @@ -35,7 +35,7 @@ type MaxReplicationLagModuleConfig struct { const healthCheckInterval = 20 var defaultMaxReplicationLagModuleConfig = MaxReplicationLagModuleConfig{ - throttlerdata.Configuration{ + throttlerdatapb.Configuration{ TargetReplicationLagSec: 2, MaxReplicationLagSec: ReplicationLagModuleDisabled, diff --git a/go/vt/throttler/throttler.go b/go/vt/throttler/throttler.go index 9f16b647705..814b68b5d92 100644 --- a/go/vt/throttler/throttler.go +++ b/go/vt/throttler/throttler.go @@ -34,8 +34,10 @@ import ( "time" log "github.com/golang/glog" + "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" + + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" ) const ( @@ -299,12 +301,12 @@ func (t *Throttler) RecordReplicationLag(time time.Time, ts *discovery.TabletSta } // GetConfiguration returns the configuration of the MaxReplicationLag module. -func (t *Throttler) GetConfiguration() *throttlerdata.Configuration { +func (t *Throttler) GetConfiguration() *throttlerdatapb.Configuration { return t.maxReplicationLagModule.getConfiguration() } // UpdateConfiguration updates the configuration of the MaxReplicationLag module. -func (t *Throttler) UpdateConfiguration(configuration *throttlerdata.Configuration, copyZeroValues bool) error { +func (t *Throttler) UpdateConfiguration(configuration *throttlerdatapb.Configuration, copyZeroValues bool) error { return t.maxReplicationLagModule.updateConfiguration(configuration, copyZeroValues) } diff --git a/go/vt/throttler/throttlerclient/throttlerclient.go b/go/vt/throttler/throttlerclient/throttlerclient.go index 9073a88931a..fdc0272ad74 100644 --- a/go/vt/throttler/throttlerclient/throttlerclient.go +++ b/go/vt/throttler/throttlerclient/throttlerclient.go @@ -24,9 +24,9 @@ import ( "fmt" "log" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" - "golang.org/x/net/context" + + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" ) // protocol specifices which RPC client implementation should be used. @@ -44,7 +44,7 @@ type Client interface { // GetConfiguration returns the configuration of the MaxReplicationlag module // for the given throttler or all throttlers if "throttlerName" is empty. - GetConfiguration(ctx context.Context, throttlerName string) (map[string]*throttlerdata.Configuration, error) + GetConfiguration(ctx context.Context, throttlerName string) (map[string]*throttlerdatapb.Configuration, error) // UpdateConfiguration (partially) updates the configuration of the // MaxReplicationlag module for the given throttler or all throttlers if @@ -52,7 +52,7 @@ type Client interface { // If "copyZeroValues" is true, fields with zero values will be copied // as well. // The function returns the names of the updated throttlers. - UpdateConfiguration(ctx context.Context, throttlerName string, configuration *throttlerdata.Configuration, copyZeroValues bool) ([]string, error) + UpdateConfiguration(ctx context.Context, throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) // ResetConfiguration resets the configuration of the MaxReplicationlag module // to the initial configuration for the given throttler or all throttlers if diff --git a/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go b/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go index 83ec6876e49..4d6e6ac1ab4 100644 --- a/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go +++ b/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go @@ -30,12 +30,13 @@ import ( "strings" "testing" + "github.com/golang/protobuf/proto" "golang.org/x/net/context" - "github.com/golang/protobuf/proto" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" "github.com/youtube/vitess/go/vt/throttler" "github.com/youtube/vitess/go/vt/throttler/throttlerclient" + + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" ) // TestSuite runs the test suite on the given throttlerclient and throttlerserver. @@ -128,7 +129,7 @@ func (tf *testFixture) configuration(t *testing.T, client throttlerclient.Client } // Test UpdateConfiguration. - config := &throttlerdata.Configuration{ + config := &throttlerdatapb.Configuration{ TargetReplicationLagSec: 1, MaxReplicationLagSec: 2, InitialRate: 3, @@ -201,12 +202,12 @@ func (fm *FakeManager) SetMaxRate(int64) []string { } // GetConfiguration implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) GetConfiguration(throttlerName string) (map[string]*throttlerdata.Configuration, error) { +func (fm *FakeManager) GetConfiguration(throttlerName string) (map[string]*throttlerdatapb.Configuration, error) { panic(panicMsg) } // UpdateConfiguration implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) UpdateConfiguration(throttlerName string, configuration *throttlerdata.Configuration, copyZeroValues bool) ([]string, error) { +func (fm *FakeManager) UpdateConfiguration(throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) { panic(panicMsg) } diff --git a/go/vt/topo/memorytopo/memorytopo.go b/go/vt/topo/memorytopo/memorytopo.go index d9c93a47728..89b4b2e835a 100644 --- a/go/vt/topo/memorytopo/memorytopo.go +++ b/go/vt/topo/memorytopo/memorytopo.go @@ -98,6 +98,18 @@ func (f *Factory) SetError(err error) { } } +// Lock blocks all requests to the topo and is exposed to allow tests to +// simulate an unresponsive topo server +func (f *Factory) Lock() { + f.mu.Lock() +} + +// Unlock unblocks all requests to the topo and is exposed to allow tests to +// simulate an unresponsive topo server +func (f *Factory) Unlock() { + f.mu.Unlock() +} + // Conn implements the topo.Conn interface. It remembers the cell, and // points at the Factory that has all the data. type Conn struct { diff --git a/go/vt/vtctl/throttler.go b/go/vt/vtctl/throttler.go index aa62e8f9833..20f8577800b 100644 --- a/go/vt/vtctl/throttler.go +++ b/go/vt/vtctl/throttler.go @@ -25,12 +25,14 @@ import ( "github.com/golang/protobuf/proto" "github.com/olekukonko/tablewriter" + "golang.org/x/net/context" + "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/proto/throttlerdata" "github.com/youtube/vitess/go/vt/throttler" "github.com/youtube/vitess/go/vt/throttler/throttlerclient" "github.com/youtube/vitess/go/vt/wrangler" - "golang.org/x/net/context" + + throttlerdatapb "github.com/youtube/vitess/go/vt/proto/throttlerdata" ) // This file contains the commands to control the throttler which is used during @@ -220,7 +222,7 @@ func commandUpdateThrottlerConfiguration(ctx context.Context, wr *wrangler.Wrang } protoText := subFlags.Arg(0) - configuration := &throttlerdata.Configuration{} + configuration := &throttlerdatapb.Configuration{} if err := proto.UnmarshalText(protoText, configuration); err != nil { return fmt.Errorf("Failed to unmarshal the configuration protobuf text (%v) into a protobuf instance: %v", protoText, err) } diff --git a/go/vt/vtctld/tablet_stats_cache.go b/go/vt/vtctld/tablet_stats_cache.go index 959a7c2ffb5..93445d411b4 100644 --- a/go/vt/vtctld/tablet_stats_cache.go +++ b/go/vt/vtctld/tablet_stats_cache.go @@ -22,8 +22,9 @@ import ( "sync" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/proto/topodata" "github.com/youtube/vitess/go/vt/topo/topoproto" + + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) // yLabel is used to keep track of the cell and type labels of the heatmap. @@ -44,7 +45,7 @@ type heatmap struct { // Data is a 2D array of values of the specified metric. Data [][]float64 // Aliases is a 2D array holding references to the tablet aliases. - Aliases [][]*topodata.TabletAlias + Aliases [][]*topodatapb.TabletAlias KeyspaceLabel label CellAndTypeLabels []yLabel ShardLabels []string @@ -73,7 +74,7 @@ const tabletUnhealthy = 2 // availableTabletTypes is an array of tabletTypes that are being considered to display on the heatmap. // Note: this list must always be sorted by the order they should appear (i.e. MASTER first, then REPLICA, then RDONLY) -var availableTabletTypes = []topodata.TabletType{topodata.TabletType_MASTER, topodata.TabletType_REPLICA, topodata.TabletType_RDONLY} +var availableTabletTypes = []topodatapb.TabletType{topodatapb.TabletType_MASTER, topodatapb.TabletType_REPLICA, topodatapb.TabletType_RDONLY} // tabletStatsCache holds the most recent status update received for // each tablet. The tablets are indexed by uid, so it is different @@ -85,7 +86,7 @@ type tabletStatsCache struct { // The first key is the keyspace, the second key is the shard, // the third key is the cell, the last key is the tabletType. // The keys are strings to allow exposing this map as a JSON object in api.go. - statuses map[string]map[string]map[string]map[topodata.TabletType][]*discovery.TabletStats + statuses map[string]map[string]map[string]map[topodatapb.TabletType][]*discovery.TabletStats // statusesByAlias is a copy of statuses and will be updated simultaneously. // The first key is the string representation of the tablet alias. statusesByAlias map[string]*discovery.TabletStats @@ -99,7 +100,7 @@ type topologyInfo struct { func newTabletStatsCache() *tabletStatsCache { return &tabletStatsCache{ - statuses: make(map[string]map[string]map[string]map[topodata.TabletType][]*discovery.TabletStats), + statuses: make(map[string]map[string]map[string]map[topodatapb.TabletType][]*discovery.TabletStats), statusesByAlias: make(map[string]*discovery.TabletStats), } } @@ -132,19 +133,19 @@ func (c *tabletStatsCache) StatsUpdate(stats *discovery.TabletStats) { // Tablet isn't tracked yet so just add it. shards, ok := c.statuses[keyspace] if !ok { - shards = make(map[string]map[string]map[topodata.TabletType][]*discovery.TabletStats) + shards = make(map[string]map[string]map[topodatapb.TabletType][]*discovery.TabletStats) c.statuses[keyspace] = shards } cells, ok := c.statuses[keyspace][shard] if !ok { - cells = make(map[string]map[topodata.TabletType][]*discovery.TabletStats) + cells = make(map[string]map[topodatapb.TabletType][]*discovery.TabletStats) c.statuses[keyspace][shard] = cells } types, ok := c.statuses[keyspace][shard][cell] if !ok { - types = make(map[topodata.TabletType][]*discovery.TabletStats) + types = make(map[topodatapb.TabletType][]*discovery.TabletStats) c.statuses[keyspace][shard][cell] = types } @@ -170,7 +171,7 @@ func tabletToMapKey(stats *discovery.TabletStats) string { // remove takes in an array and returns it with the specified element removed // (leaves the array unchanged if element isn't in the array). -func remove(tablets []*discovery.TabletStats, tabletAlias *topodata.TabletAlias) []*discovery.TabletStats { +func remove(tablets []*discovery.TabletStats, tabletAlias *topodatapb.TabletAlias) []*discovery.TabletStats { filteredTablets := tablets[:0] for _, tablet := range tablets { if !topoproto.TabletAliasEqual(tablet.Tablet.Alias, tabletAlias) { @@ -191,7 +192,7 @@ func (c *tabletStatsCache) topologyInfo(selectedKeyspace, selectedCell string) * } } -func makeStringTypeList(types []topodata.TabletType) []string { +func makeStringTypeList(types []topodatapb.TabletType) []string { var list []string for _, t := range types { list = append(list, t.String()) @@ -228,10 +229,10 @@ func (c *tabletStatsCache) cellsLocked(keyspace, cell string) []string { // tabletTypesLocked returns the tablet types needed to be displayed in the heatmap based on the dropdown filters. // It returns tablet type if a specific one was chosen or returns all of them if 'all' is chosen for keyspace and/or cell. // This method is used by heatmapData to traverse over the desired tablet types. -func (c *tabletStatsCache) tabletTypesLocked(keyspace, cell, tabletType string) []topodata.TabletType { +func (c *tabletStatsCache) tabletTypesLocked(keyspace, cell, tabletType string) []topodatapb.TabletType { if tabletType != "all" { tabletTypeObj, _ := topoproto.ParseTabletType(tabletType) - return []topodata.TabletType{tabletTypeObj} + return []topodatapb.TabletType{tabletTypeObj} } return c.typesInTopology(keyspace, cell) } @@ -263,9 +264,9 @@ func (c *tabletStatsCache) cellsInTopology(keyspace string) []string { // typesInTopology returns all the types in the given keyspace and cell. // If all keyspaces and cells is chosen, it returns the types from every cell in every keyspace. // This method is used by topologyInfo to send all available options for the tablet type dropdown -func (c *tabletStatsCache) typesInTopology(keyspace, cell string) []topodata.TabletType { +func (c *tabletStatsCache) typesInTopology(keyspace, cell string) []topodatapb.TabletType { keyspaces := c.keyspacesLocked(keyspace) - types := make(map[topodata.TabletType]bool) + types := make(map[topodatapb.TabletType]bool) // Going through the shards in every cell in every keyspace to get existing tablet types for _, ks := range keyspaces { cellsPerKeyspace := c.cellsLocked(ks, cell) @@ -286,8 +287,8 @@ func (c *tabletStatsCache) typesInTopology(keyspace, cell string) []topodata.Tab return typesList } -func sortTypes(types map[topodata.TabletType]bool) []topodata.TabletType { - var listOfTypes []topodata.TabletType +func sortTypes(types map[topodatapb.TabletType]bool) []topodatapb.TabletType { + var listOfTypes []topodatapb.TabletType for _, tabType := range availableTabletTypes { if t, _ := types[tabType]; t { listOfTypes = append(listOfTypes, tabType) @@ -340,7 +341,7 @@ func (c *tabletStatsCache) heatmapData(selectedKeyspace, selectedCell, selectedT // The loop goes through every outer label (in this case, cell). for _, cell := range cells { var cellData [][]float64 - var cellAliases [][]*topodata.TabletAlias + var cellAliases [][]*topodatapb.TabletAlias var cellLabel yLabel if aggregated { @@ -355,7 +356,7 @@ func (c *tabletStatsCache) heatmapData(selectedKeyspace, selectedCell, selectedT // Adding the data in reverse to match the format that the plotly map takes in. h.Data = append([][]float64{cellData[i]}, h.Data...) if cellAliases != nil { - h.Aliases = append([][]*topodata.TabletAlias{cellAliases[i]}, h.Aliases...) + h.Aliases = append([][]*topodatapb.TabletAlias{cellAliases[i]}, h.Aliases...) } } h.CellAndTypeLabels = append(h.CellAndTypeLabels, cellLabel) @@ -398,10 +399,10 @@ func (c *tabletStatsCache) heatmapData(selectedKeyspace, selectedCell, selectedT return heatmaps, nil } -func (c *tabletStatsCache) unaggregatedData(keyspace, cell, selectedType string, metricFunc func(stats *discovery.TabletStats) float64) ([][]float64, [][]*topodata.TabletAlias, yLabel) { +func (c *tabletStatsCache) unaggregatedData(keyspace, cell, selectedType string, metricFunc func(stats *discovery.TabletStats) float64) ([][]float64, [][]*topodatapb.TabletAlias, yLabel) { // This loop goes through every nested label (in this case, tablet type). var cellData [][]float64 - var cellAliases [][]*topodata.TabletAlias + var cellAliases [][]*topodatapb.TabletAlias var cellLabel yLabel cellLabelSpan := 0 tabletTypes := c.tabletTypesLocked(keyspace, cell, selectedType) @@ -420,10 +421,10 @@ func (c *tabletStatsCache) unaggregatedData(keyspace, cell, selectedType string, // dataRowsPerType is a 2D array that will hold the data of the tablets of one (cell, type) combination. dataRowsPerType := make([][]float64, maxRowLength) // aliasRowsPerType is a 2D array that will hold the aliases of the tablets of one (cell, type) combination. - aliasRowsPerType := make([][]*topodata.TabletAlias, maxRowLength) + aliasRowsPerType := make([][]*topodatapb.TabletAlias, maxRowLength) for i := range dataRowsPerType { dataRowsPerType[i] = make([]float64, len(shards)) - aliasRowsPerType[i] = make([]*topodata.TabletAlias, len(shards)) + aliasRowsPerType[i] = make([]*topodatapb.TabletAlias, len(shards)) } // Filling in the 2D array with tablet data by columns. @@ -458,7 +459,7 @@ func (c *tabletStatsCache) unaggregatedData(keyspace, cell, selectedType string, // aggregatedData gets heatmapData by taking the average of the metric value of all tablets within the keyspace and cell of the // specified type (or from all types if 'all' was selected). -func (c *tabletStatsCache) aggregatedData(keyspace, cell, selectedType, selectedMetric string, metricFunc func(stats *discovery.TabletStats) float64) ([][]float64, [][]*topodata.TabletAlias, yLabel) { +func (c *tabletStatsCache) aggregatedData(keyspace, cell, selectedType, selectedMetric string, metricFunc func(stats *discovery.TabletStats) float64) ([][]float64, [][]*topodatapb.TabletAlias, yLabel) { shards := c.shards(keyspace) tabletTypes := c.tabletTypesLocked(keyspace, cell, selectedType) @@ -507,7 +508,7 @@ func (c *tabletStatsCache) aggregatedData(keyspace, cell, selectedType, selected return cellData, nil, cellLabel } -func (c *tabletStatsCache) tabletStats(tabletAlias *topodata.TabletAlias) (discovery.TabletStats, error) { +func (c *tabletStatsCache) tabletStats(tabletAlias *topodatapb.TabletAlias) (discovery.TabletStats, error) { c.mu.Lock() defer c.mu.Unlock() diff --git a/go/vt/vtctld/tablet_stats_cache_test.go b/go/vt/vtctld/tablet_stats_cache_test.go index 1ae06d7b8ce..9bdc28e39d8 100644 --- a/go/vt/vtctld/tablet_stats_cache_test.go +++ b/go/vt/vtctld/tablet_stats_cache_test.go @@ -21,7 +21,6 @@ import ( "testing" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/proto/topodata" querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" @@ -146,7 +145,7 @@ func TestHeatmapData(t *testing.T) { {float64(ts2.Stats.SecondsBehindMaster), float64(ts4.Stats.SecondsBehindMaster)}, {float64(ts1.Stats.SecondsBehindMaster), float64(-1)}, }, - Aliases: [][]*topodata.TabletAlias{ + Aliases: [][]*topodatapb.TabletAlias{ {nil, ts9.Tablet.Alias}, {ts6.Tablet.Alias, ts8.Tablet.Alias}, {ts5.Tablet.Alias, nil}, @@ -193,7 +192,7 @@ func TestHeatmapData(t *testing.T) { {float64(ts3.Stats.SecondsBehindMaster), float64(-1)}, {float64(ts2.Stats.SecondsBehindMaster), float64(ts4.Stats.SecondsBehindMaster)}, }, - Aliases: [][]*topodata.TabletAlias{ + Aliases: [][]*topodatapb.TabletAlias{ {ts5.Tablet.Alias, nil}, {ts3.Tablet.Alias, nil}, {ts2.Tablet.Alias, ts4.Tablet.Alias}, @@ -233,7 +232,7 @@ func TestHeatmapData(t *testing.T) { {float64(ts11.Stats.SecondsBehindMaster), float64(ts13.Stats.SecondsBehindMaster)}, {float64(ts10.Stats.SecondsBehindMaster), float64(-1)}, }, - Aliases: [][]*topodata.TabletAlias{ + Aliases: [][]*topodatapb.TabletAlias{ {ts12.Tablet.Alias, nil}, {ts11.Tablet.Alias, ts13.Tablet.Alias}, {ts10.Tablet.Alias, nil}, @@ -313,7 +312,7 @@ func TestHeatmapData(t *testing.T) { Data: [][]float64{ {float64(-1), float64(ts7.Stats.SecondsBehindMaster)}, }, - Aliases: [][]*topodata.TabletAlias{ + Aliases: [][]*topodatapb.TabletAlias{ {nil, ts7.Tablet.Alias}, }, CellAndTypeLabels: []yLabel{ diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index 308b9d9b106..f9c69101777 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -79,7 +79,8 @@ func newFakeResolver(opts *Options, hc discovery.HealthCheck, serv srvtopo.Serve } tc := vtgate.NewTxConn(gw, txMode) sc := vtgate.NewScatterConn("", tc, gw, hc) - return vtgate.NewResolver(serv, cell, sc) + srvResolver := srvtopo.NewResolver(serv, gw, cell) + return vtgate.NewResolver(srvResolver, serv, cell, sc) } func buildTopology(opts *Options, vschemaStr string, numShardsPerKeyspace int) error { diff --git a/go/vt/vtgate/autocommit_test.go b/go/vt/vtgate/autocommit_test.go index 658c4ad313d..252336fd448 100644 --- a/go/vt/vtgate/autocommit_test.go +++ b/go/vt/vtgate/autocommit_test.go @@ -105,11 +105,8 @@ func TestAutocommitUpdateVindexChange(t *testing.T) { Sql: "select name, lastname from user2 where id = 1 for update", BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "update user2 set name = 'myname', lastname = 'mylastname' where id = 1 /* vtgate:: keyspace_id:166b40b44aba4bd6 */", - BindVariables: map[string]*querypb.BindVariable{ - "_name0": sqltypes.BytesBindVariable([]byte("myname")), - "_lastname0": sqltypes.BytesBindVariable([]byte("mylastname")), - }, + Sql: "update user2 set name = 'myname', lastname = 'mylastname' where id = 1 /* vtgate:: keyspace_id:166b40b44aba4bd6 */", + BindVariables: map[string]*querypb.BindVariable{}, }}) testAsTransactionCount(t, "sbc1", sbc1, 0) testCommitCount(t, "sbc1", sbc1, 1) diff --git a/go/vt/vtgate/engine/merge_sort_test.go b/go/vt/vtgate/engine/merge_sort_test.go index bd900acb14a..87335cb32e7 100644 --- a/go/vt/vtgate/engine/merge_sort_test.go +++ b/go/vt/vtgate/engine/merge_sort_test.go @@ -342,6 +342,10 @@ func (t *fakeVcursor) Execute(method string, query string, bindvars map[string]* panic("unimplemented") } +func (t *fakeVcursor) ExecuteAutocommit(method string, query string, bindvars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) { + panic("unimplemented") +} + func (t *fakeVcursor) ExecuteMultiShard(keyspace string, shardQueries map[string]*querypb.BoundQuery, isDML, canAutocommit bool) (*sqltypes.Result, error) { panic("unimplemented") } @@ -374,6 +378,10 @@ func (t *fakeVcursor) GetKeyspaceShards(vkeyspace *vindexes.Keyspace) (string, [ panic("unimplemented") } +func (t *fakeVcursor) GetShardsForKsids(allShards []*topodatapb.ShardReference, ksids vindexes.Ksids) ([]string, error) { + panic("unimplemented") +} + func (t *fakeVcursor) GetShardForKeyspaceID(allShards []*topodatapb.ShardReference, keyspaceID []byte) (string, error) { panic("unimplemented") } diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index 07458c30ad7..e4f35eef6af 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -42,12 +42,20 @@ const ListVarName = "__vals" type VCursor interface { // Context returns the context of the current request. Context() context.Context + + // V3 functions. Execute(method string, query string, bindvars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) + ExecuteAutocommit(method string, query string, bindvars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) + + // Shard-level functions. ExecuteMultiShard(keyspace string, shardQueries map[string]*querypb.BoundQuery, isDML, canAutocommit bool) (*sqltypes.Result, error) ExecuteStandalone(query string, bindvars map[string]*querypb.BindVariable, keyspace, shard string) (*sqltypes.Result, error) StreamExecuteMulti(query string, keyspace string, shardVars map[string]map[string]*querypb.BindVariable, callback func(reply *sqltypes.Result) error) error + + // Topo functions. GetKeyspaceShards(vkeyspace *vindexes.Keyspace) (string, []*topodatapb.ShardReference, error) GetShardForKeyspaceID(allShards []*topodatapb.ShardReference, keyspaceID []byte) (string, error) + GetShardsForKsids(allShards []*topodatapb.ShardReference, ksids vindexes.Ksids) ([]string, error) } // Plan represents the execution strategy for a given query. diff --git a/go/vt/vtgate/engine/route.go b/go/vt/vtgate/engine/route.go index 1e51e73ef1e..bbaca89da67 100644 --- a/go/vt/vtgate/engine/route.go +++ b/go/vt/vtgate/engine/route.go @@ -226,6 +226,9 @@ const ( // Value, and a Subquery, which will be used to // determine if lookup rows need to be deleted. DeleteEqual + // DeleteSharded is for routing a scattered + // delete statement. + DeleteSharded // InsertUnsharded is for routing an insert statement // to an unsharded keyspace. InsertUnsharded @@ -252,6 +255,7 @@ var routeName = map[RouteOpcode]string{ UpdateUnsharded: "UpdateUnsharded", UpdateEqual: "UpdateEqual", DeleteUnsharded: "DeleteUnsharded", + DeleteSharded: "DeleteSharded", DeleteEqual: "DeleteEqual", InsertUnsharded: "InsertUnsharded", InsertSharded: "InsertSharded", @@ -299,6 +303,8 @@ func (route *Route) execute(vcursor VCursor, bindVars, joinVars map[string]*quer return route.execUpdateEqual(vcursor, bindVars) case DeleteEqual: return route.execDeleteEqual(vcursor, bindVars) + case DeleteSharded: + return route.execDeleteSharded(vcursor, bindVars) case InsertSharded, InsertShardedIgnore: return route.execInsertSharded(vcursor, bindVars) case InsertUnsharded: @@ -511,6 +517,16 @@ func (route *Route) execUpdateEqualChangedVindex(vcursor VCursor, query string, return result, nil } +func (route *Route) execDeleteSharded(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { + params, err := route.paramsAllShards(vcursor, bindVars) + if err != nil { + return nil, err + } + sql := sqlannotation.AnnotateIfDML(route.Query, nil) + shardQueries := route.getShardQueries(sql, params) + return vcursor.ExecuteMultiShard(params.ks, shardQueries, true /* isDML */, true /* canAutocommit */) +} + func (route *Route) execDeleteEqual(vcursor VCursor, bindVars map[string]*querypb.BindVariable) (*sqltypes.Result, error) { key, err := route.Values[0].ResolveValue(bindVars) if err != nil { @@ -612,11 +628,11 @@ func (route *Route) resolveShards(vcursor VCursor, bindVars map[string]*querypb. return "", nil, err } for i, ksids := range ksidss { - for _, ksid := range ksids { - shard, err := vcursor.GetShardForKeyspaceID(allShards, ksid) - if err != nil { - return "", nil, err - } + shards, err := vcursor.GetShardsForKsids(allShards, ksids) + if err != nil { + return "", nil, err + } + for _, shard := range shards { routing.Add(shard, sqltypes.ValueToProto(vindexKeys[i])) } } @@ -648,40 +664,34 @@ func (route *Route) resolveSingleShard(vcursor VCursor, bindVars map[string]*que } func (route *Route) updateChangedVindexes(subQueryResult *sqltypes.Result, vcursor VCursor, bindVars map[string]*querypb.BindVariable, ksid []byte) error { - if len(route.ChangedVindexValues) == 0 { - return nil - } if len(subQueryResult.Rows) == 0 { // NOOP, there are no actual rows changing due to this statement return nil } - for tIdx, colVindex := range route.Table.Owned { - if colValues, ok := route.ChangedVindexValues[colVindex.Name]; ok { - if len(subQueryResult.Rows) > 1 { - return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: update changes multiple columns in the vindex") - } + if len(subQueryResult.Rows) > 1 { + return vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: update changes multiple columns in the vindex") + } + colnum := 0 + for _, colVindex := range route.Table.Owned { + // Fetch the column values. colnum must keep incrementing. + fromIds := make([]sqltypes.Value, 0, len(colVindex.Columns)) + for range colVindex.Columns { + fromIds = append(fromIds, subQueryResult.Rows[0][colnum]) + colnum++ + } - fromIds := make([][]sqltypes.Value, len(subQueryResult.Rows)) - var vindexColumnKeys []sqltypes.Value + // Update columns only if they're being changed. + if colValues, ok := route.ChangedVindexValues[colVindex.Name]; ok { + vindexColumnKeys := make([]sqltypes.Value, 0, len(colValues)) for _, colValue := range colValues { resolvedVal, err := colValue.ResolveValue(bindVars) if err != nil { return err } vindexColumnKeys = append(vindexColumnKeys, resolvedVal) - - } - - for rowIdx, row := range subQueryResult.Rows { - for colIdx := range colVindex.Columns { - fromIds[rowIdx] = append(fromIds[rowIdx], row[tIdx+colIdx]) - } } - if err := colVindex.Vindex.(vindexes.Lookup).Delete(vcursor, fromIds, ksid); err != nil { - return err - } - if err := route.processOwned(vcursor, [][]sqltypes.Value{vindexColumnKeys}, colVindex, bindVars, [][]byte{ksid}); err != nil { + if err := colVindex.Vindex.(vindexes.Lookup).Update(vcursor, fromIds, ksid, vindexColumnKeys); err != nil { return err } } diff --git a/go/vt/vtgate/engine/vindex_func.go b/go/vt/vtgate/engine/vindex_func.go index 70fb140d81b..9aa8ded99fb 100644 --- a/go/vt/vtgate/engine/vindex_func.go +++ b/go/vt/vtgate/engine/vindex_func.go @@ -23,6 +23,7 @@ import ( "github.com/youtube/vitess/go/vt/vtgate/vindexes" querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) var _ Primitive = (*VindexFunc)(nil) @@ -121,7 +122,7 @@ func (vf *VindexFunc) mapVindex(vcursor VCursor, bindVars, joinVars map[string]* } if ksids[0] != nil { result.Rows = [][]sqltypes.Value{ - vf.buildRow(vkey, ksids[0]), + vf.buildRow(vkey, ksids[0], nil), } result.RowsAffected = 1 } @@ -130,24 +131,47 @@ func (vf *VindexFunc) mapVindex(vcursor VCursor, bindVars, joinVars map[string]* if err != nil { return nil, err } - for _, ksid := range ksidss[0] { - result.Rows = append(result.Rows, vf.buildRow(vkey, ksid)) + if ksidss[0].Range != nil { + result.Rows = append(result.Rows, vf.buildRow(vkey, nil, ksidss[0].Range)) + result.RowsAffected = 1 + } else { + for _, ksid := range ksidss[0].IDs { + result.Rows = append(result.Rows, vf.buildRow(vkey, ksid, nil)) + } + result.RowsAffected = uint64(len(ksidss[0].IDs)) } - result.RowsAffected = uint64(len(ksidss[0])) default: panic("unexpected") } return result, nil } -func (vf *VindexFunc) buildRow(id sqltypes.Value, ksid []byte) []sqltypes.Value { +func (vf *VindexFunc) buildRow(id sqltypes.Value, ksid []byte, kr *topodatapb.KeyRange) []sqltypes.Value { row := make([]sqltypes.Value, 0, len(vf.Fields)) - keyspaceID := sqltypes.MakeTrusted(sqltypes.VarBinary, ksid) for _, col := range vf.Cols { - if col == 0 { + switch col { + case 0: row = append(row, id) - } else { - row = append(row, keyspaceID) + case 1: + if ksid != nil { + row = append(row, sqltypes.MakeTrusted(sqltypes.VarBinary, ksid)) + } else { + row = append(row, sqltypes.NULL) + } + case 2: + if kr != nil { + row = append(row, sqltypes.MakeTrusted(sqltypes.VarBinary, kr.Start)) + } else { + row = append(row, sqltypes.NULL) + } + case 3: + if kr != nil { + row = append(row, sqltypes.MakeTrusted(sqltypes.VarBinary, kr.End)) + } else { + row = append(row, sqltypes.NULL) + } + default: + panic("BUG: unexpected column number") } } return row diff --git a/go/vt/vtgate/engine/vindex_func_test.go b/go/vt/vtgate/engine/vindex_func_test.go index d4f375709a4..0184b801470 100644 --- a/go/vt/vtgate/engine/vindex_func_test.go +++ b/go/vt/vtgate/engine/vindex_func_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/youtube/vitess/go/sqltypes" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" "github.com/youtube/vitess/go/vt/vtgate/vindexes" ) @@ -43,7 +44,7 @@ func (v *uvindex) Map(vindexes.VCursor, []sqltypes.Value) ([][]byte, error) { } // nvindex is NonUnique. -type nvindex struct{ match bool } +type nvindex struct{ matchid, matchkr bool } func (*nvindex) String() string { return "nvindex" } func (*nvindex) Cost() int { return 1 } @@ -51,13 +52,24 @@ func (*nvindex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, er panic("unimplemented") } -func (v *nvindex) Map(vindexes.VCursor, []sqltypes.Value) ([][][]byte, error) { - if v.match { - return [][][]byte{{ - []byte("foo"), []byte("bar"), +func (v *nvindex) Map(vindexes.VCursor, []sqltypes.Value) ([]vindexes.Ksids, error) { + if v.matchid { + return []vindexes.Ksids{{ + IDs: [][]byte{ + []byte("foo"), + []byte("bar"), + }, + }}, nil + } + if v.matchkr { + return []vindexes.Ksids{{ + Range: &topodatapb.KeyRange{ + Start: []byte{0x40}, + End: []byte{0x60}, + }, }}, nil } - return [][][]byte{nil}, nil + return []vindexes.Ksids{{}}, nil } func TestVindexFuncMap(t *testing.T) { @@ -68,7 +80,7 @@ func TestVindexFuncMap(t *testing.T) { t.Fatal(err) } want := &sqltypes.Result{ - Fields: sqltypes.MakeTestFields("id|keyspace_id", "varbinary|varbinary"), + Fields: sqltypes.MakeTestFields("id|keyspace_id|range_start|range_end", "varbinary|varbinary|varbinary|varbinary"), } if !reflect.DeepEqual(got, want) { t.Errorf("Execute(Map, uvindex(none)):\n%v, want\n%v", got, want) @@ -81,7 +93,7 @@ func TestVindexFuncMap(t *testing.T) { t.Fatal(err) } want = sqltypes.MakeTestResult( - sqltypes.MakeTestFields("id|keyspace_id", "varbinary|varbinary"), + sqltypes.MakeTestFields("id|keyspace_id|range_start|range_end", "varbinary|varbinary|varbinary|varbinary"), "1|foo", ) if !reflect.DeepEqual(got, want) { @@ -95,37 +107,62 @@ func TestVindexFuncMap(t *testing.T) { t.Fatal(err) } want = &sqltypes.Result{ - Fields: sqltypes.MakeTestFields("id|keyspace_id", "varbinary|varbinary"), + Fields: sqltypes.MakeTestFields("id|keyspace_id|range_start|range_end", "varbinary|varbinary|varbinary|varbinary"), } if !reflect.DeepEqual(got, want) { t.Errorf("Execute(Map, uvindex(none)):\n%v, want\n%v", got, want) } // NonUnique Vindex returning 2 rows. - vf = testVindexFunc(&nvindex{match: true}) + vf = testVindexFunc(&nvindex{matchid: true}) got, err = vf.Execute(nil, nil, nil, false) if err != nil { t.Fatal(err) } want = sqltypes.MakeTestResult( - sqltypes.MakeTestFields("id|keyspace_id", "varbinary|varbinary"), - "1|foo", - "1|bar", + sqltypes.MakeTestFields("id|keyspace_id|range_start|range_end", "varbinary|varbinary|varbinary|varbinary"), + "1|foo||", + "1|bar||", ) + // Massage the rows because MakeTestResult doesn't do NULL values. + for _, row := range want.Rows { + row[2] = sqltypes.NULL + row[3] = sqltypes.NULL + } + if !reflect.DeepEqual(got, want) { + t.Errorf("Execute(Map, uvindex(none)):\n%v, want\n%v", got, want) + } + + // NonUnique Vindex returning keyrange + vf = testVindexFunc(&nvindex{matchkr: true}) + got, err = vf.Execute(nil, nil, nil, false) + if err != nil { + t.Fatal(err) + } + want = &sqltypes.Result{ + Fields: sqltypes.MakeTestFields("id|keyspace_id|range_start|range_end", "varbinary|varbinary|varbinary|varbinary"), + Rows: [][]sqltypes.Value{{ + sqltypes.NewVarBinary("1"), + sqltypes.NULL, + sqltypes.MakeTrusted(sqltypes.VarBinary, []byte{0x40}), + sqltypes.MakeTrusted(sqltypes.VarBinary, []byte{0x60}), + }}, + RowsAffected: 1, + } if !reflect.DeepEqual(got, want) { t.Errorf("Execute(Map, uvindex(none)):\n%v, want\n%v", got, want) } } func TestVindexFuncStreamExecute(t *testing.T) { - vf := testVindexFunc(&nvindex{match: true}) + vf := testVindexFunc(&nvindex{matchid: true}) want := []*sqltypes.Result{{ - Fields: sqltypes.MakeTestFields("id|keyspace_id", "varbinary|varbinary"), + Fields: sqltypes.MakeTestFields("id|keyspace_id|range_start|range_end", "varbinary|varbinary|varbinary|varbinary"), }, { Rows: [][]sqltypes.Value{{ - sqltypes.NewVarBinary("1"), sqltypes.NewVarBinary("foo"), + sqltypes.NewVarBinary("1"), sqltypes.NewVarBinary("foo"), sqltypes.NULL, sqltypes.NULL, }, { - sqltypes.NewVarBinary("1"), sqltypes.NewVarBinary("bar"), + sqltypes.NewVarBinary("1"), sqltypes.NewVarBinary("bar"), sqltypes.NULL, sqltypes.NULL, }}, }} i := 0 @@ -148,7 +185,7 @@ func TestVindexFuncGetFields(t *testing.T) { t.Fatal(err) } want := &sqltypes.Result{ - Fields: sqltypes.MakeTestFields("id|keyspace_id", "varbinary|varbinary"), + Fields: sqltypes.MakeTestFields("id|keyspace_id|range_start|range_end", "varbinary|varbinary|varbinary|varbinary"), } if !reflect.DeepEqual(got, want) { t.Errorf("Execute(Map, uvindex(none)):\n%v, want\n%v", got, want) @@ -156,7 +193,7 @@ func TestVindexFuncGetFields(t *testing.T) { } func TestFieldOrder(t *testing.T) { - vf := testVindexFunc(&nvindex{match: true}) + vf := testVindexFunc(&nvindex{matchid: true}) vf.Fields = sqltypes.MakeTestFields("keyspace_id|id|keyspace_id", "varbinary|varbinary|varbinary") vf.Cols = []int{1, 0, 1} got, err := vf.Execute(nil, nil, nil, true) @@ -175,8 +212,8 @@ func TestFieldOrder(t *testing.T) { func testVindexFunc(v vindexes.Vindex) *VindexFunc { return &VindexFunc{ - Fields: sqltypes.MakeTestFields("id|keyspace_id", "varbinary|varbinary"), - Cols: []int{0, 1}, + Fields: sqltypes.MakeTestFields("id|keyspace_id|range_start|range_end", "varbinary|varbinary|varbinary|varbinary"), + Cols: []int{0, 1, 2, 3}, Opcode: VindexMap, Vindex: v, Value: int64PlanValue(1), diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index ca3067b6882..69a29ca8b9b 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -284,8 +284,8 @@ func (e *Executor) handleExec(ctx context.Context, safeSession *SafeSession, sql } func (e *Executor) shardExec(ctx context.Context, safeSession *SafeSession, sql string, bindVars map[string]*querypb.BindVariable, target querypb.Target, logStats *LogStats) (*sqltypes.Result, error) { - f := func(keyspace string) (string, []string, error) { - return keyspace, []string{target.Shard}, nil + f := func() ([]*srvtopo.ResolvedShard, error) { + return e.resolver.resolver.ResolveShards(ctx, target.Keyspace, []string{target.Shard}, target.TabletType) } return e.resolver.Execute(ctx, sql, bindVars, target.Keyspace, target.TabletType, safeSession.Session, f, false /* notInTransaction */, safeSession.Options, logStats) } @@ -295,25 +295,19 @@ func (e *Executor) handleDDL(ctx context.Context, safeSession *SafeSession, sql return nil, errNoKeyspace } - f := func(keyspace string) (string, []string, error) { - var shards []string + f := func() ([]*srvtopo.ResolvedShard, error) { + var result []*srvtopo.ResolvedShard + var err error if target.Shard == "" { - ks, _, allShards, err := srvtopo.GetKeyspaceShards(ctx, e.serv, e.cell, keyspace, target.TabletType) - if err != nil { - return "", nil, err - } - // The usual keyspace resolution rules are applied. - // This means that the keyspace can be remapped to a new one - // if vertical resharding is in progress. - keyspace = ks - for _, shard := range allShards { - shards = append(shards, shard.Name) - } + result, _, err = e.resolver.resolver.GetAllShards(ctx, target.Keyspace, target.TabletType) } else { - shards = []string{target.Shard} + result, err = e.resolver.resolver.ResolveShards(ctx, target.Keyspace, []string{target.Shard}, target.TabletType) + } + if err != nil { + return nil, err } - logStats.ShardQueries = uint32(len(shards)) - return keyspace, shards, nil + logStats.ShardQueries = uint32(len(result)) + return result, nil } execStart := time.Now() @@ -521,7 +515,7 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql switch show.Type { case sqlparser.KeywordString(sqlparser.DATABASES), sqlparser.KeywordString(sqlparser.VITESS_KEYSPACES): - keyspaces, err := srvtopo.GetAllKeyspaces(ctx, e.serv, e.cell) + keyspaces, err := e.resolver.resolver.GetAllKeyspaces(ctx) if err != nil { return nil, err } @@ -537,14 +531,14 @@ func (e *Executor) handleShow(ctx context.Context, safeSession *SafeSession, sql RowsAffected: uint64(len(rows)), }, nil case sqlparser.KeywordString(sqlparser.VITESS_SHARDS): - keyspaces, err := srvtopo.GetAllKeyspaces(ctx, e.serv, e.cell) + keyspaces, err := e.resolver.resolver.GetAllKeyspaces(ctx) if err != nil { return nil, err } var rows [][]sqltypes.Value for _, keyspace := range keyspaces { - _, _, shards, err := srvtopo.GetKeyspaceShards(ctx, e.serv, e.cell, keyspace, target.TabletType) + _, _, shards, err := e.resolver.resolver.GetKeyspaceShards(ctx, keyspace, target.TabletType) if err != nil { // There might be a misconfigured keyspace or no shards in the keyspace. // Skip any errors and move on. @@ -725,11 +719,12 @@ func (e *Executor) handleOther(ctx context.Context, safeSession *SafeSession, sq return nil, errNoKeyspace } if target.Shard == "" { - var err error - target.Keyspace, target.Shard, err = srvtopo.GetAnyShard(ctx, e.serv, e.cell, target.Keyspace, target.TabletType) + // shardExec will re-resolve this a bit later. + rs, err := e.resolver.resolver.GetAnyShard(ctx, target.Keyspace, target.TabletType) if err != nil { return nil, err } + target.Keyspace, target.Shard = rs.Target.Keyspace, rs.Target.Shard } execStart := time.Now() result, err := e.shardExec(ctx, safeSession, sql, bindVars, target, logStats) @@ -755,6 +750,13 @@ func (e *Executor) StreamExecute(ctx context.Context, method string, safeSession } query, comments := sqlparser.SplitTrailingComments(sql) vcursor := newVCursorImpl(ctx, safeSession, target, comments, e, logStats) + + // check if this is a stream statement for messaging + // TODO: support keyRange syntax + if logStats.StmtType == sqlparser.StmtType(sqlparser.StmtStream) { + return e.handleMessageStream(ctx, safeSession, sql, target, callback, vcursor, logStats) + } + plan, err := e.getPlan( vcursor, query, @@ -818,32 +820,76 @@ func (e *Executor) StreamExecute(ctx context.Context, method string, safeSession return err } -// MessageAck acks messages. -func (e *Executor) MessageAck(ctx context.Context, keyspace, name string, ids []*querypb.Value) (int64, error) { - table, err := e.VSchema().FindTable(keyspace, name) +// handleMessageStream executes queries of the form 'stream * from t' +func (e *Executor) handleMessageStream(ctx context.Context, safeSession *SafeSession, sql string, target querypb.Target, callback func(*sqltypes.Result) error, vcursor *vcursorImpl, logStats *LogStats) error { + stmt, err := sqlparser.Parse(sql) if err != nil { - return 0, err + logStats.Error = err + return err + } + + streamStmt, ok := stmt.(*sqlparser.Stream) + if !ok { + logStats.Error = err + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unrecognized STREAM statement: %v", sql) + } + + table, err := vcursor.FindTable(streamStmt.Table) + if err != nil { + logStats.Error = err + return err } - // TODO(sougou): Change this to use Session. - vcursor := newVCursorImpl( + + execStart := time.Now() + logStats.PlanTime = execStart.Sub(logStats.StartTime) + + err = e.MessageStream(ctx, table.Keyspace.Name, target.Shard, nil, table.Name.CompliantName(), callback) + logStats.Error = err + logStats.ExecuteTime = time.Since(execStart) + return err +} + +// MessageStream is part of the vtgate service API. This is a V2 level API that's sent +// to the Resolver. +func (e *Executor) MessageStream(ctx context.Context, keyspace string, shard string, keyRange *topodatapb.KeyRange, name string, callback func(*sqltypes.Result) error) error { + err := e.resolver.MessageStream( ctx, - NewSafeSession(&vtgatepb.Session{}), - querypb.Target{ - Keyspace: table.Keyspace.Name, - TabletType: topodatapb.TabletType_MASTER, - }, - "", - e, - nil, + keyspace, + shard, + keyRange, + name, + callback, ) + return formatError(err) +} - newKeyspace, _, allShards, err := srvtopo.GetKeyspaceShards(ctx, e.serv, e.cell, table.Keyspace.Name, topodatapb.TabletType_MASTER) +// MessageAck acks messages. +// FIXME(alainjobart) the keyspace field here is not used for routing, +// but just for finding the table in the VSchema. If we don't find the +// table in the VSchema, we could just assume it's sharded (which would work +// for unsharded as well) and route it to the provided keyspace. +func (e *Executor) MessageAck(ctx context.Context, keyspace, name string, ids []*querypb.Value) (int64, error) { + table, err := e.VSchema().FindTable(keyspace, name) if err != nil { return 0, err } - shardIDs := make(map[string][]*querypb.Value) + var rss []*srvtopo.ResolvedShard + var rssValues [][]*querypb.Value if table.Keyspace.Sharded { + // TODO(sougou): Change this to use Session. + vcursor := newVCursorImpl( + ctx, + NewSafeSession(&vtgatepb.Session{}), + querypb.Target{ + Keyspace: table.Keyspace.Name, + TabletType: topodatapb.TabletType_MASTER, + }, + "", + e, + nil, + ) + // We always use the (unique) primary vindex. The ID must be the // primary vindex for message tables. mapper := table.ColumnVindexes[0].Vindex.(vindexes.Unique) @@ -856,20 +902,19 @@ func (e *Executor) MessageAck(ctx context.Context, keyspace, name string, ids [] if err != nil { return 0, err } - for i, ksid := range ksids { - if ksid == nil { - continue - } - shard, err := srvtopo.GetShardForKeyspaceID(allShards, ksid) - if err != nil { - return 0, err - } - shardIDs[shard] = append(shardIDs[shard], ids[i]) + rss, rssValues, err = e.resolver.resolver.ResolveKeyspaceIdsValues(ctx, table.Keyspace.Name, ids, ksids, topodatapb.TabletType_MASTER) + if err != nil { + return 0, err } } else { - shardIDs[allShards[0].Name] = ids + rs, err := e.resolver.resolver.GetAnyShard(ctx, table.Keyspace.Name, topodatapb.TabletType_MASTER) + if err != nil { + return 0, err + } + rss = []*srvtopo.ResolvedShard{rs} + rssValues = [][]*querypb.Value{ids} } - return e.scatterConn.MessageAck(ctx, newKeyspace, shardIDs, name) + return e.scatterConn.MessageAck(ctx, rss, rssValues, name) } // IsKeyspaceRangeBasedSharded returns true if the keyspace in the vschema is diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index 756d0fe3d21..3dff0ab4082 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -106,11 +106,8 @@ func TestUpdateEqual(t *testing.T) { BindVariables: map[string]*querypb.BindVariable{}, }, { - Sql: "update user2 set name = 'myname', lastname = 'mylastname' where id = 1 /* vtgate:: keyspace_id:166b40b44aba4bd6 */", - BindVariables: map[string]*querypb.BindVariable{ - "_name0": sqltypes.BytesBindVariable([]byte("myname")), - "_lastname0": sqltypes.BytesBindVariable([]byte("mylastname")), - }, + Sql: "update user2 set name = 'myname', lastname = 'mylastname' where id = 1 /* vtgate:: keyspace_id:166b40b44aba4bd6 */", + BindVariables: map[string]*querypb.BindVariable{}, }, } if !reflect.DeepEqual(sbc1.Queries, wantQueries) { @@ -142,7 +139,127 @@ func TestUpdateEqual(t *testing.T) { if !reflect.DeepEqual(sbclookup.Queries, wantQueries) { t.Errorf("sbclookup.Queries: %+v, want %+v\n", sbclookup.Queries, wantQueries) } +} +func TestUpdateMultiOwned(t *testing.T) { + vschema := ` +{ + "sharded": true, + "vindexes": { + "hash_index": { + "type": "hash" + }, + "lookup1": { + "type": "lookup_hash_unique", + "owner": "user", + "params": { + "table": "music_user_map", + "from": "from1,from2", + "to": "user_id" + } + }, + "lookup2": { + "type": "lookup_hash_unique", + "owner": "user", + "params": { + "table": "music_user_map", + "from": "from1,from2", + "to": "user_id" + } + }, + "lookup3": { + "type": "lookup_hash_unique", + "owner": "user", + "params": { + "table": "music_user_map", + "from": "from1,from2", + "to": "user_id" + } + } + }, + "tables": { + "user": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + }, + { + "columns": ["a", "b"], + "name": "lookup1" + }, + { + "columns": ["c", "d"], + "name": "lookup2" + }, + { + "columns": ["e", "f"], + "name": "lookup3" + } + ] + } + } +} +` + executor, sbc1, sbc2, sbclookup := createCustomExecutor(vschema) + + sbc1.SetResults([]*sqltypes.Result{ + sqltypes.MakeTestResult( + sqltypes.MakeTestFields("a|b|c|d|e|f", "int64|int64|int64|int64|int64|int64"), + "10|20|30|40|50|60", + ), + }) + _, err := executorExec(executor, "update user set a=1, b=2, f=4, e=3 where id=1", nil) + if err != nil { + t.Fatal(err) + } + wantQueries := []*querypb.BoundQuery{{ + Sql: "select a, b, c, d, e, f from user where id = 1 for update", + BindVariables: map[string]*querypb.BindVariable{}, + }, { + Sql: "update user set a = 1, b = 2, f = 4, e = 3 where id = 1 /* vtgate:: keyspace_id:166b40b44aba4bd6 */", + BindVariables: map[string]*querypb.BindVariable{}, + }} + if !reflect.DeepEqual(sbc1.Queries, wantQueries) { + t.Errorf("sbc1.Queries:\n%+v, want\n%+v\n", sbc1.Queries, wantQueries) + } + if sbc2.Queries != nil { + t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries) + } + + wantQueries = []*querypb.BoundQuery{{ + Sql: "delete from music_user_map where from1 = :from1 and from2 = :from2 and user_id = :user_id", + BindVariables: map[string]*querypb.BindVariable{ + "from1": sqltypes.Int64BindVariable(10), + "from2": sqltypes.Int64BindVariable(20), + "user_id": sqltypes.Uint64BindVariable(1), + }, + }, { + Sql: "insert into music_user_map(from1, from2, user_id) values (:from10, :from20, :user_id0)", + BindVariables: map[string]*querypb.BindVariable{ + "from10": sqltypes.Int64BindVariable(1), + "from20": sqltypes.Int64BindVariable(2), + "user_id0": sqltypes.Uint64BindVariable(1), + }, + }, { + Sql: "delete from music_user_map where from1 = :from1 and from2 = :from2 and user_id = :user_id", + BindVariables: map[string]*querypb.BindVariable{ + "from1": sqltypes.Int64BindVariable(50), + "from2": sqltypes.Int64BindVariable(60), + "user_id": sqltypes.Uint64BindVariable(1), + }, + }, { + Sql: "insert into music_user_map(from1, from2, user_id) values (:from10, :from20, :user_id0)", + BindVariables: map[string]*querypb.BindVariable{ + "from10": sqltypes.Int64BindVariable(3), + "from20": sqltypes.Int64BindVariable(4), + "user_id0": sqltypes.Uint64BindVariable(1), + }, + }} + + if !reflect.DeepEqual(sbclookup.Queries, wantQueries) { + t.Errorf("sbclookup.Queries:\n%+v, want\n%+v\n", sbclookup.Queries, wantQueries) + } } func TestUpdateComments(t *testing.T) { @@ -374,6 +491,25 @@ func TestDeleteEqual(t *testing.T) { } } +func TestDeleteSharded(t *testing.T) { + executor, sbc1, sbc2, _ := createExecutorEnv() + _, err := executorExec(executor, "delete from user_extra", nil) + if err != nil { + t.Error(err) + } + // Queries get annotatted. + wantQueries := []*querypb.BoundQuery{{ + Sql: "delete from user_extra/* vtgate:: filtered_replication_unfriendly */", + BindVariables: map[string]*querypb.BindVariable{}, + }} + if !reflect.DeepEqual(sbc1.Queries, wantQueries) { + t.Errorf("sbc.Queries:\n%+v, want\n%+v\n", sbc1.Queries, wantQueries) + } + if !reflect.DeepEqual(sbc2.Queries, wantQueries) { + t.Errorf("sbc.Queries:\n%+v, want\n%+v\n", sbc2.Queries, wantQueries) + } +} + func TestDeleteComments(t *testing.T) { executor, sbc, _, sbclookup := createExecutorEnv() @@ -531,6 +667,85 @@ func TestInsertSharded(t *testing.T) { } } +func TestInsertShardedAutocommitLookup(t *testing.T) { + + vschema := ` +{ + "sharded": true, + "vindexes": { + "hash_index": { + "type": "hash" + }, + "name_user_map": { + "type": "lookup_hash", + "owner": "user", + "params": { + "table": "name_user_map", + "from": "name", + "to": "user_id", + "autocommit": "true" + } + } + }, + "tables": { + "user": { + "column_vindexes": [ + { + "column": "Id", + "name": "hash_index" + }, + { + "column": "name", + "name": "name_user_map" + } + ], + "auto_increment": { + "column": "id", + "sequence": "user_seq" + }, + "columns": [ + { + "name": "textcol", + "type": "VARCHAR" + } + ] + } + } +} +` + executor, sbc1, sbc2, sbclookup := createCustomExecutor(vschema) + + _, err := executorExec(executor, "insert into user(id, v, name) values (1, 2, 'myname')", nil) + if err != nil { + t.Error(err) + } + wantQueries := []*querypb.BoundQuery{{ + Sql: "insert into user(id, v, name) values (:_Id0, 2, :_name0) /* vtgate:: keyspace_id:166b40b44aba4bd6 */", + BindVariables: map[string]*querypb.BindVariable{ + "_Id0": sqltypes.Int64BindVariable(1), + "_name0": sqltypes.BytesBindVariable([]byte("myname")), + "__seq0": sqltypes.Int64BindVariable(1), + }, + }} + if !reflect.DeepEqual(sbc1.Queries, wantQueries) { + t.Errorf("sbc1.Queries:\n%+v, want\n%+v\n", sbc1.Queries, wantQueries) + } + if sbc2.Queries != nil { + t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries) + } + wantQueries = []*querypb.BoundQuery{{ + Sql: "insert into name_user_map(name, user_id) values (:name0, :user_id0) on duplicate key update name = values(name), user_id = values(user_id)", + BindVariables: map[string]*querypb.BindVariable{ + "name0": sqltypes.BytesBindVariable([]byte("myname")), + "user_id0": sqltypes.Uint64BindVariable(1), + }, + }} + // autocommit should go as ExecuteBatch + if !reflect.DeepEqual(sbclookup.BatchQueries[0], wantQueries) { + t.Errorf("sbclookup.BatchQueries[0]: \n%+v, want \n%+v", sbclookup.BatchQueries[0], wantQueries) + } +} + func TestInsertShardedIgnore(t *testing.T) { executor, sbc1, sbc2, sbclookup := createExecutorEnv() diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index 2f6fb6af06d..b1e103fa629 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -26,6 +26,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/streamlog" "github.com/youtube/vitess/go/vt/discovery" + "github.com/youtube/vitess/go/vt/vtgate/vindexes" "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" "golang.org/x/net/context" @@ -85,6 +86,9 @@ var executorVSchema = ` }, "keyspace_id": { "type": "numeric" + }, + "krcol_vdx": { + "type": "keyrange_lookuper" } }, "tables": { @@ -130,6 +134,14 @@ var executorVSchema = ` } ] }, + "sharded_user_msgs": { + "column_vindexes": [ + { + "column": "user_id", + "name": "hash_index" + } + ] + }, "music": { "column_vindexes": [ { @@ -194,6 +206,18 @@ var executorVSchema = ` } ] }, + "keyrange_table": { + "column_vindexes": [ + { + "column": "id", + "name": "hash_index" + }, + { + "column": "krcol", + "name": "krcol_vdx" + } + ] + }, "ksid_table": { "column_vindexes": [ { @@ -205,6 +229,7 @@ var executorVSchema = ` } } ` + var badVSchema = ` { "sharded": false, @@ -224,6 +249,7 @@ var unshardedVSchema = ` "music_user_map": {}, "name_user_map": {}, "name_lastname_keyspace_id_map": {}, + "user_msgs": {}, "ins_lookup": {}, "main1": { "auto_increment": { @@ -236,6 +262,31 @@ var unshardedVSchema = ` } ` +// keyRangeLookuper is for testing a lookup that returns a keyrange. +type keyRangeLookuper struct { +} + +func (v *keyRangeLookuper) String() string { return "keyrange_lookuper" } +func (*keyRangeLookuper) Cost() int { return 0 } +func (*keyRangeLookuper) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { + return []bool{}, nil +} +func (*keyRangeLookuper) Map(vindexes.VCursor, []sqltypes.Value) ([]vindexes.Ksids, error) { + return []vindexes.Ksids{{ + Range: &topodatapb.KeyRange{ + End: []byte{0x10}, + }, + }}, nil +} + +func newLookupMigrator(name string, params map[string]string) (vindexes.Vindex, error) { + return &keyRangeLookuper{}, nil +} + +func init() { + vindexes.Register("keyrange_lookuper", newLookupMigrator) +} + const testBufferSize = 10 const testCacheSize = int64(10) @@ -270,6 +321,24 @@ func createExecutorEnv() (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn return executor, sbc1, sbc2, sbclookup } +func createCustomExecutor(vschema string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn) { + cell := "aa" + hc := discovery.NewFakeHealthCheck() + s := createSandbox("TestExecutor") + s.VSchema = vschema + serv := new(sandboxTopo) + resolver := newTestResolver(hc, serv, cell) + sbc1 = hc.AddTestTablet(cell, "-20", 1, "TestExecutor", "-20", topodatapb.TabletType_MASTER, true, 1, nil) + sbc2 = hc.AddTestTablet(cell, "40-60", 1, "TestExecutor", "40-60", topodatapb.TabletType_MASTER, true, 1, nil) + + createSandbox(KsTestUnsharded) + sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_MASTER, true, 1, nil) + getSandbox(KsTestUnsharded).VSchema = unshardedVSchema + + executor = NewExecutor(context.Background(), serv, cell, "", resolver, false, testBufferSize, testCacheSize, false) + return executor, sbc1, sbc2, sbclookup +} + func executorExec(executor *Executor, sql string, bv map[string]*querypb.BindVariable) (*sqltypes.Result, error) { return executor.Execute( context.Background(), diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index 824e2f13c4d..e0b2e945e32 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -583,6 +583,26 @@ func TestStreamSelectEqual(t *testing.T) { } } +func TestSelectKeyRange(t *testing.T) { + executor, sbc1, sbc2, _ := createExecutorEnv() + + _, err := executorExec(executor, "select id, krcol from keyrange_table where krcol = 1", nil) + if err != nil { + t.Error(err) + } + wantQueries := []*querypb.BoundQuery{{ + Sql: "select id, krcol from keyrange_table where krcol = 1", + BindVariables: map[string]*querypb.BindVariable{}, + }} + if !reflect.DeepEqual(sbc1.Queries, wantQueries) { + t.Errorf("sbc1.Queries: %+v, want %+v\n", sbc1.Queries, wantQueries) + } + if sbc2.Queries != nil { + t.Errorf("sbc2.Queries: %+v, want nil\n", sbc2.Queries) + } + sbc1.Queries = nil +} + func TestSelectEqualFail(t *testing.T) { executor, _, _, sbclookup := createExecutorEnv() s := getSandbox("TestExecutor") diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go new file mode 100644 index 00000000000..529750cbdbb --- /dev/null +++ b/go/vt/vtgate/executor_stream_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "testing" + "time" + + "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/discovery" + querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + _ "github.com/youtube/vitess/go/vt/vtgate/vindexes" + "github.com/youtube/vitess/go/vt/vttablet/sandboxconn" + "golang.org/x/net/context" +) + +func TestStreamSQLUnsharded(t *testing.T) { + executor, _, _, _ := createExecutorEnv() + logChan := QueryLogger.Subscribe("Test") + defer QueryLogger.Unsubscribe(logChan) + + sql := "stream * from user_msgs" + result, err := executorStreamMessages(executor, sql) + if err != nil { + t.Error(err) + } + wantResult := sandboxconn.StreamRowResult + if !result.Equal(wantResult) { + t.Errorf("result: %+v, want %+v", result, wantResult) + } +} + +func TestStreamSQLSharded(t *testing.T) { + // Special setup: Don't use createExecutorEnv. + cell := "aa" + hc := discovery.NewFakeHealthCheck() + s := createSandbox("TestExecutor") + s.VSchema = executorVSchema + getSandbox(KsTestUnsharded).VSchema = unshardedVSchema + serv := new(sandboxTopo) + resolver := newTestResolver(hc, serv, cell) + shards := []string{"-20", "20-40", "40-60", "60-80", "80-a0", "a0-c0", "c0-e0", "e0-"} + var conns []*sandboxconn.SandboxConn + for _, shard := range shards { + sbc := hc.AddTestTablet(cell, shard, 1, "TestExecutor", shard, topodatapb.TabletType_MASTER, true, 1, nil) + conns = append(conns, sbc) + } + executor := NewExecutor(context.Background(), serv, cell, "", resolver, false, testBufferSize, testCacheSize, false) + + sql := "stream * from sharded_user_msgs" + result, err := executorStreamMessages(executor, sql) + if err != nil { + t.Error(err) + } + wantResult := &sqltypes.Result{ + Fields: sandboxconn.SingleRowResult.Fields, + Rows: [][]sqltypes.Value{ + sandboxconn.StreamRowResult.Rows[0], + sandboxconn.StreamRowResult.Rows[0], + sandboxconn.StreamRowResult.Rows[0], + sandboxconn.StreamRowResult.Rows[0], + sandboxconn.StreamRowResult.Rows[0], + sandboxconn.StreamRowResult.Rows[0], + sandboxconn.StreamRowResult.Rows[0], + sandboxconn.StreamRowResult.Rows[0], + }, + } + if !result.Equal(wantResult) { + t.Errorf("result: %+v, want %+v", result, wantResult) + } +} + +func executorStreamMessages(executor *Executor, sql string) (qr *sqltypes.Result, err error) { + results := make(chan *sqltypes.Result, 100) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancel() + err = executor.StreamExecute( + ctx, + "TestExecuteStream", + NewSafeSession(masterSession), + sql, + nil, + querypb.Target{ + TabletType: topodatapb.TabletType_MASTER, + }, + func(qr *sqltypes.Result) error { + results <- qr + return nil + }, + ) + close(results) + if err != nil { + return nil, err + } + first := true + for r := range results { + if first { + qr = &sqltypes.Result{Fields: r.Fields} + first = false + } + qr.Rows = append(qr.Rows, r.Rows...) + } + return qr, nil +} diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 66e5c77ab09..7639840fc47 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -580,11 +580,12 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("TestExecutor", "idx_noauto", "hash", "", "noauto_table"), buildVarCharRow("TestExecutor", "insert_ignore_idx", "lookup_hash", "from=fromcol; table=ins_lookup; to=tocol", "insert_ignore_test"), buildVarCharRow("TestExecutor", "keyspace_id", "numeric", "", ""), + buildVarCharRow("TestExecutor", "krcol_vdx", "keyrange_lookuper", "", ""), buildVarCharRow("TestExecutor", "music_user_map", "lookup_hash_unique", "from=music_id; table=music_user_map; to=user_id", "music"), buildVarCharRow("TestExecutor", "name_lastname_keyspace_id_map", "lookup", "from=name,lastname; table=name_lastname_keyspace_id_map; to=keyspace_id", "user2"), buildVarCharRow("TestExecutor", "name_user_map", "lookup_hash", "from=name; table=name_user_map; to=user_id", "user"), }, - RowsAffected: 8, + RowsAffected: 9, } if !reflect.DeepEqual(qr, wantqr) { t.Errorf("show vindexes:\n%+v, want\n%+v", qr, wantqr) @@ -684,9 +685,10 @@ func TestExecutorShow(t *testing.T) { buildVarCharRow("name_lastname_keyspace_id_map"), buildVarCharRow("name_user_map"), buildVarCharRow("simple"), + buildVarCharRow("user_msgs"), buildVarCharRow("user_seq"), }, - RowsAffected: 8, + RowsAffected: 9, } if !reflect.DeepEqual(qr, wantqr) { t.Errorf("show vschema_tables:\n%+v, want\n%+v", qr, wantqr) @@ -773,7 +775,6 @@ func TestExecutorOther(t *testing.T) { "explain", "repair", "optimize", - "truncate", } wantCount := []int64{0, 0, 0} for _, stmt := range stmts { @@ -824,6 +825,7 @@ func TestExecutorDDL(t *testing.T) { "alter", "rename", "drop", + "truncate", } wantCount := []int64{0, 0, 0} for _, stmt := range stmts { diff --git a/go/vt/vtgate/gateway/discoverygateway.go b/go/vt/vtgate/gateway/discoverygateway.go index 2f40b4dea73..9ade90cf3b0 100644 --- a/go/vt/vtgate/gateway/discoverygateway.go +++ b/go/vt/vtgate/gateway/discoverygateway.go @@ -118,7 +118,7 @@ func createDiscoveryGateway(hc discovery.HealthCheck, topoServer *topo.Server, s ctw := discovery.NewCellTabletsWatcher(dg.topoServer, tr, c, *refreshInterval, *topoReadConcurrency) dg.tabletsWatchers = append(dg.tabletsWatchers, ctw) } - dg.QueryService = queryservice.Wrap(dg, dg.withRetry) + dg.QueryService = queryservice.Wrap(nil, dg.withRetry) return dg } @@ -139,14 +139,31 @@ func (dg *discoveryGateway) WaitForTablets(ctx context.Context, tabletTypesToWai return nil } - return dg.tsc.WaitForAllServingTablets(ctx, dg.srvTopoServer, dg.localCell, tabletTypesToWait) + // Finds the targets to look for. + targets, err := srvtopo.FindAllTargets(ctx, dg.srvTopoServer, dg.localCell, tabletTypesToWait) + if err != nil { + return err + } + + return dg.tsc.WaitForAllServingTablets(ctx, targets) } -// StreamHealth is currently not implemented. -// This function hides the inner implementation. -// TODO(alainjobart): Maybe we should? +// GetAggregateStats is part of the srvtopo.TargetStats interface. +func (dg *discoveryGateway) GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, queryservice.QueryService, error) { + stats, err := dg.tsc.GetAggregateStats(target) + return stats, dg, err +} + +// GetMasterCell is part of the srvtopo.TargetStats interface. +func (dg *discoveryGateway) GetMasterCell(keyspace, shard string) (string, queryservice.QueryService, error) { + cell, err := dg.tsc.GetMasterCell(keyspace, shard) + return cell, dg, err +} + +// StreamHealth is not forwarded to any other tablet, +// but we handle it directly here. func (dg *discoveryGateway) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { - panic("not implemented") + return StreamHealthFromTargetStatsListener(ctx, dg.tsc, callback) } // Close shuts down underlying connections. diff --git a/go/vt/vtgate/gateway/gateway.go b/go/vt/vtgate/gateway/gateway.go index 38939317c9f..89330eead34 100644 --- a/go/vt/vtgate/gateway/gateway.go +++ b/go/vt/vtgate/gateway/gateway.go @@ -20,6 +20,7 @@ package gateway import ( "flag" + "fmt" "time" log "github.com/golang/glog" @@ -30,6 +31,7 @@ import ( "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/vttablet/queryservice" + querypb "github.com/youtube/vitess/go/vt/proto/query" topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) @@ -44,8 +46,15 @@ var ( // A Gateway is the query processing module for each shard, // which is used by ScatterConn. type Gateway interface { + // TODO(alainjobart) The QueryService part of this interface + // will be removed soon, in favor of the TargetStats part (that + // returns a QueryService) queryservice.QueryService + // srvtopo.TargetStats allows this Gateway to resolve a Target + // into a QueryService. It is used by the srvtopo.Resolver object. + srvtopo.TargetStats + // WaitForTablets asks the gateway to wait for the provided // tablets types to be available. It the context is canceled // before the end, it should return ctx.Err(). @@ -107,3 +116,53 @@ func WaitForTablets(gw Gateway, tabletTypesToWait []topodatapb.TabletType) error } return err } + +// StreamHealthFromTargetStatsListener responds to a StreamHealth +// streaming RPC using a srvtopo.TargetStatsListener implementation. +func StreamHealthFromTargetStatsListener(ctx context.Context, l srvtopo.TargetStatsListener, callback func(*querypb.StreamHealthResponse) error) error { + // Subscribe to the TargetStatsListener aggregate stats. + id, entries, c, err := l.Subscribe() + if err != nil { + return err + } + defer func() { + // Unsubscribe so we don't receive more updates, and + // drain the channel. + l.Unsubscribe(id) + for range c { + } + }() + + // Send all current entries. + for _, e := range entries { + shr := &querypb.StreamHealthResponse{ + Target: e.Target, + TabletExternallyReparentedTimestamp: e.TabletExternallyReparentedTimestamp, + AggregateStats: e.Stats, + } + if err := callback(shr); err != nil { + return err + } + } + + // Now listen for updates, or the end of the connection. + for { + select { + case <-ctx.Done(): + return ctx.Err() + case e, ok := <-c: + if !ok { + // Channel is closed, should never happen. + return fmt.Errorf("channel closed") + } + shr := &querypb.StreamHealthResponse{ + Target: e.Target, + TabletExternallyReparentedTimestamp: e.TabletExternallyReparentedTimestamp, + AggregateStats: e.Stats, + } + if err := callback(shr); err != nil { + return err + } + } + } +} diff --git a/go/vt/vtgate/gateway/hybridgateway.go b/go/vt/vtgate/gateway/hybridgateway.go new file mode 100644 index 00000000000..8ac6f0faa35 --- /dev/null +++ b/go/vt/vtgate/gateway/hybridgateway.go @@ -0,0 +1,198 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gateway + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/youtube/vitess/go/stats" + querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" + "github.com/youtube/vitess/go/vt/srvtopo" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" +) + +// HybridGateway implements the gateway.Gateway interface by forwarding +// the queries to the right underlying implementation: +// - it has one gateway that watches for tablets. Usually a DiscoveryGateway. +// Useful for local tablets, or remote tablets that can be accessed. +// - it has a list of remote vtgate connections to talk to l2 vtgate processes. +// Useful for remote tablets that are far away, or if the number of local +// tablets grows too big. +// +// Note the WaitForTablets method for now only waits on the local gateway. +type HybridGateway struct { + queryservice.QueryService + + // gw is the local gateway that has the local connections. + gw Gateway + + // l2vtgates is the list of remote connections to other vtgate pools. + l2vtgates []*L2VTGateConn +} + +// NewHybridGateway returns a new HybridGateway based on the provided +// parameters. gw can be nil, in which case it is assumed there is no +// local tablets. +func NewHybridGateway(gw Gateway, addrs []string, retryCount int) (*HybridGateway, error) { + h := &HybridGateway{ + gw: gw, + } + + for i, addr := range addrs { + conn, err := NewL2VTGateConn(fmt.Sprintf("%v", i), addr, retryCount) + if err != nil { + h.Close(context.Background()) + return nil, fmt.Errorf("dialing %v failed: %v", addr, err) + } + h.l2vtgates = append(h.l2vtgates, conn) + } + + h.QueryService = queryservice.Wrap(nil, h.route) + return h, nil +} + +// Close is part of the queryservice.QueryService interface. +func (h *HybridGateway) Close(ctx context.Context) error { + for _, l := range h.l2vtgates { + l.Close(ctx) + } + return nil +} + +// WaitForTablets is part of the Gateway interface. +// We just forward to the local Gateway, if any. +func (h *HybridGateway) WaitForTablets(ctx context.Context, tabletTypesToWait []topodatapb.TabletType) error { + if h.gw != nil { + return h.gw.WaitForTablets(ctx, tabletTypesToWait) + } + + // No local tablets, we don't wait for anything here. + return nil +} + +// RegisterStats registers the l2vtgate connection counts stats. +func (h *HybridGateway) RegisterStats() { + stats.NewMultiCountersFunc("L2VtgateConnections", []string{"Keyspace", "ShardName", "TabletType"}, h.servingConnStats) +} + +func (h *HybridGateway) servingConnStats() map[string]int64 { + res := make(map[string]int64) + for _, l := range h.l2vtgates { + l.servingConnStats(res) + } + return res +} + +// CacheStatus is part of the Gateway interface. It just concatenates +// all statuses from all underlying parts. +func (h *HybridGateway) CacheStatus() TabletCacheStatusList { + var result TabletCacheStatusList + + // Start with the local Gateway part. + if h.gw != nil { + result = h.gw.CacheStatus() + } + + // Then add each gateway one at a time. + for _, l := range h.l2vtgates { + partial := l.CacheStatus() + result = append(result, partial...) + } + + return result +} + +// route sends the action to the right underlying implementation. +// This doesn't retry, and doesn't collect stats, as these two are +// done by the underlying gw or l2VTGateConn. +// +// FIXME(alainjobart) now we only use gw, or the one l2vtgates we have. +// Need to deprecate this code in favor of using GetAggregateStats. +func (h *HybridGateway) route(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) error { + if h.gw != nil { + err, _ := inner(ctx, target, h.gw) + return NewShardError(err, target, nil, inTransaction) + } + if len(h.l2vtgates) == 1 { + err, _ := inner(ctx, target, h.l2vtgates[0]) + return NewShardError(err, target, nil, inTransaction) + } + return NewShardError(topo.ErrNoNode, target, nil, inTransaction) +} + +// GetAggregateStats is part of the srvtopo.TargetStats interface, included +// in the gateway.Gateway interface. +func (h *HybridGateway) GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, queryservice.QueryService, error) { + // Start with the local Gateway part. + if h.gw != nil { + stats, qs, err := h.gw.GetAggregateStats(target) + if err != topo.ErrNoNode { + // The local gateway either worked, or returned an + // error. But it knows about this target. + return stats, qs, err + } + } + + // The local gateway doesn't know about this target, + // try the remote ones. + for _, l := range h.l2vtgates { + stats, err := l.GetAggregateStats(target) + if err != topo.ErrNoNode { + // This remote gateway either worked, or returned an + // error. But it knows about this target. + return stats, l, err + } + } + + // We couldn't find a way to resolve this. + return nil, nil, topo.ErrNoNode +} + +// GetMasterCell is part of the srvtopo.TargetStats interface, included +// in the gateway.Gateway interface. +func (h *HybridGateway) GetMasterCell(keyspace, shard string) (cell string, qs queryservice.QueryService, err error) { + // Start with the local Gateway part. + if h.gw != nil { + cell, qs, err := h.gw.GetMasterCell(keyspace, shard) + if err != topo.ErrNoNode { + // The local gateway either worked, or returned an + // error. But it knows about this target. + return cell, qs, err + } + // The local gateway doesn't know about this target, + // try the remote ones. + } + + for _, l := range h.l2vtgates { + cell, err := l.GetMasterCell(keyspace, shard) + if err != topo.ErrNoNode { + // This remote gateway either worked, or returned an + // error. But it knows about this target. + return cell, l, err + } + } + + // We couldn't find a way to resolve this. + return "", nil, topo.ErrNoNode +} + +var _ Gateway = (*HybridGateway)(nil) +var _ srvtopo.TargetStats = (*HybridGateway)(nil) diff --git a/go/vt/vtgate/gateway/l2vtgateconn.go b/go/vt/vtgate/gateway/l2vtgateconn.go new file mode 100644 index 00000000000..228b23bf084 --- /dev/null +++ b/go/vt/vtgate/gateway/l2vtgateconn.go @@ -0,0 +1,271 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gateway + +import ( + "fmt" + "sort" + "sync" + "time" + + log "github.com/golang/glog" + "github.com/youtube/vitess/go/vt/grpcclient" + "github.com/youtube/vitess/go/vt/topo" + "github.com/youtube/vitess/go/vt/topo/topoproto" + "github.com/youtube/vitess/go/vt/vttablet/queryservice" + "github.com/youtube/vitess/go/vt/vttablet/tabletconn" + "golang.org/x/net/context" + + querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" +) + +// L2VTGateConn keeps a single connection to a vtgate backend. The +// underlying vtgate backend must have been started with the +// '-enable_forwarding' flag. +// +// It will keep a healthcheck connection going to the target, to get +// the list of available Targets. It remembers them, and exposes a +// srvtopo.TargetStats interface to query them. +type L2VTGateConn struct { + queryservice.QueryService + + // addr is the destination address. Immutable. + addr string + + // name is the name to display for stats. Immutable. + name string + + // retryCount is the number of times to retry an action. Immutable. + retryCount int + + // cancel is associated with the life cycle of this L2VTGateConn. + // It is called when Close is called. + cancel context.CancelFunc + + // mu protects the following fields. + mu sync.RWMutex + // stats has all the stats we received from the other side. + stats map[l2VTGateConnKey]*l2VTGateConnValue + // statusAggregators is a map indexed by the key + // name:keyspace/shard/tablet type + statusAggregators map[string]*TabletStatusAggregator +} + +type l2VTGateConnKey struct { + keyspace string + shard string + tabletType topodatapb.TabletType +} + +type l2VTGateConnValue struct { + tabletExternallyReparentedTimestamp int64 + + // aggregates has the per-cell aggregates. + aggregates map[string]*querypb.AggregateStats +} + +// NewL2VTGateConn creates a new L2VTGateConn object. It also starts +// the background go routine to monitor its health. +func NewL2VTGateConn(name, addr string, retryCount int) (*L2VTGateConn, error) { + conn, err := tabletconn.GetDialer()(&topodatapb.Tablet{ + Hostname: addr, + }, grpcclient.FailFast(true)) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(context.Background()) + c := &L2VTGateConn{ + addr: addr, + name: name, + cancel: cancel, + stats: make(map[l2VTGateConnKey]*l2VTGateConnValue), + statusAggregators: make(map[string]*TabletStatusAggregator), + } + c.QueryService = queryservice.Wrap(conn, c.withRetry) + go c.checkConn(ctx) + return c, nil +} + +// Close is part of the queryservice.QueryService interface. +func (c *L2VTGateConn) Close(ctx context.Context) error { + c.cancel() + return nil +} + +func (c *L2VTGateConn) servingConnStats(res map[string]int64) { + c.mu.Lock() + defer c.mu.Unlock() + for k, s := range c.stats { + key := fmt.Sprintf("%s.%s.%s", k.keyspace, k.shard, topoproto.TabletTypeLString(k.tabletType)) + var htc int32 + for _, stats := range s.aggregates { + htc += stats.HealthyTabletCount + } + res[key] += int64(htc) + } +} + +func (c *L2VTGateConn) checkConn(ctx context.Context) { + for { + err := c.StreamHealth(ctx, c.streamHealthCallback) + log.Warningf("StreamHealth to %v failed, will retry after 30s: %v", c.addr, err) + time.Sleep(30 * time.Second) + } +} + +func (c *L2VTGateConn) streamHealthCallback(shr *querypb.StreamHealthResponse) error { + key := l2VTGateConnKey{ + keyspace: shr.Target.Keyspace, + shard: shr.Target.Shard, + tabletType: shr.Target.TabletType, + } + c.mu.Lock() + defer c.mu.Unlock() + e, ok := c.stats[key] + if !ok { + // No current value for this keyspace/shard/tablet type. + // Check if we received a delete, drop it. + if shr.AggregateStats == nil || (shr.AggregateStats.HealthyTabletCount == 0 && shr.AggregateStats.UnhealthyTabletCount == 0) { + return nil + } + + // It's a record for a keyspace/shard/tablet type we + // don't know yet, just create our new record with one + // entry in the map for the cell. + c.stats[key] = &l2VTGateConnValue{ + tabletExternallyReparentedTimestamp: shr.TabletExternallyReparentedTimestamp, + aggregates: map[string]*querypb.AggregateStats{ + shr.Target.Cell: shr.AggregateStats, + }, + } + return nil + } + + // Save our new value. + e.tabletExternallyReparentedTimestamp = shr.TabletExternallyReparentedTimestamp + e.aggregates[shr.Target.Cell] = shr.AggregateStats + return nil +} + +// GetAggregateStats is the discovery part of srvtopo.TargetStats interface. +func (c *L2VTGateConn) GetAggregateStats(target *querypb.Target) (*querypb.AggregateStats, error) { + key := l2VTGateConnKey{ + keyspace: target.Keyspace, + shard: target.Shard, + tabletType: target.TabletType, + } + c.mu.RLock() + defer c.mu.RUnlock() + e, ok := c.stats[key] + if !ok { + return nil, topo.ErrNoNode + } + + a, ok := e.aggregates[target.Cell] + if !ok { + return nil, topo.ErrNoNode + } + return a, nil +} + +// GetMasterCell is the discovery part of the srvtopo.TargetStats interface. +func (c *L2VTGateConn) GetMasterCell(keyspace, shard string) (cell string, err error) { + key := l2VTGateConnKey{ + keyspace: keyspace, + shard: shard, + tabletType: topodatapb.TabletType_MASTER, + } + c.mu.RLock() + defer c.mu.RUnlock() + e, ok := c.stats[key] + if !ok { + return "", topo.ErrNoNode + } + + for cell := range e.aggregates { + return cell, nil + } + return "", topo.ErrNoNode +} + +// CacheStatus returns a list of TabletCacheStatus per +// name:keyspace/shard/tablet type. +func (c *L2VTGateConn) CacheStatus() TabletCacheStatusList { + c.mu.RLock() + res := make(TabletCacheStatusList, 0, len(c.statusAggregators)) + for _, aggr := range c.statusAggregators { + res = append(res, aggr.GetCacheStatus()) + } + c.mu.RUnlock() + sort.Sort(res) + return res +} + +func (c *L2VTGateConn) updateStats(target *querypb.Target, startTime time.Time, err error) { + elapsed := time.Now().Sub(startTime) + aggr := c.getStatsAggregator(target) + aggr.UpdateQueryInfo("", target.TabletType, elapsed, err != nil) +} + +func (c *L2VTGateConn) getStatsAggregator(target *querypb.Target) *TabletStatusAggregator { + key := fmt.Sprintf("%v:%v/%v/%v", c.name, target.Keyspace, target.Shard, target.TabletType.String()) + + // get existing aggregator + c.mu.RLock() + aggr, ok := c.statusAggregators[key] + c.mu.RUnlock() + if ok { + return aggr + } + + // create a new one, but check again before the creation + c.mu.Lock() + defer c.mu.Unlock() + aggr, ok = c.statusAggregators[key] + if ok { + return aggr + } + aggr = NewTabletStatusAggregator(target.Keyspace, target.Shard, target.TabletType, key) + c.statusAggregators[key] = aggr + return aggr +} + +// withRetry uses the connection to execute the action. If there are +// retryable errors, it retries retryCount times before failing. It +// does not retry if the connection is in the middle of a +// transaction. While returning the error check if it maybe a result +// of a resharding event, and set the re-resolve bit and let the upper +// layers re-resolve and retry. +func (c *L2VTGateConn) withRetry(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) error { + var err error + for i := 0; i < c.retryCount+1; i++ { + startTime := time.Now() + var canRetry bool + err, canRetry = inner(ctx, target, conn) + if target != nil { + // target can be nil for StreamHealth calls. + c.updateStats(target, startTime, err) + } + if canRetry { + continue + } + break + } + return NewShardError(err, target, nil, inTransaction) +} diff --git a/go/vt/vtgate/gateway/l2vtgategateway.go b/go/vt/vtgate/gateway/l2vtgategateway.go deleted file mode 100644 index 49c22e2c916..00000000000 --- a/go/vt/vtgate/gateway/l2vtgategateway.go +++ /dev/null @@ -1,266 +0,0 @@ -/* -Copyright 2017 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package gateway - -import ( - "flag" - "fmt" - "sort" - "strings" - "sync" - "time" - - log "github.com/golang/glog" - "golang.org/x/net/context" - - "github.com/youtube/vitess/go/flagutil" - "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/grpcclient" - "github.com/youtube/vitess/go/vt/key" - "github.com/youtube/vitess/go/vt/srvtopo" - "github.com/youtube/vitess/go/vt/topo" - "github.com/youtube/vitess/go/vt/topo/topoproto" - "github.com/youtube/vitess/go/vt/vttablet/queryservice" - "github.com/youtube/vitess/go/vt/vttablet/tabletconn" - - querypb "github.com/youtube/vitess/go/vt/proto/query" - topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" -) - -const ( - gatewayImplementationL2VTGate = "l2vtgategateway" -) - -var ( - l2VTGateGatewayAddrs flagutil.StringListValue -) - -func init() { - flag.Var(&l2VTGateGatewayAddrs, "l2vtgategateway_addrs", "Specifies a comma-separated list of 'addr|keyspace|shard_name or keyrange' values for l2vtgate locations") - RegisterCreator(gatewayImplementationL2VTGate, createL2VTGateGateway) -} - -// l2VTGateConn is a connection to a backend l2vtgate pool -type l2VTGateConn struct { - // set at construction time - addr string - keyspace string - shard string - keyRange *topodatapb.KeyRange // only set if shard is also a KeyRange - conn queryservice.QueryService -} - -// l2VTGateGateway is the main gateway object -type l2VTGateGateway struct { - queryservice.QueryService - // retryCount is set at construction time - retryCount int - - // mu protects all fields below. - mu sync.RWMutex - // connMap is the main map to find the right l2 vtgate pool. - // It is indexed by keyspace name. - connMap map[string][]*l2VTGateConn - // tabletConnMap is a map of address to queryservice.QueryService objects. - // It is used so we don't open multiple connections to the same backend. - tabletConnMap map[string]queryservice.QueryService - // statusAggregators is a map indexed by the key - // l2vtgate address + tablet type - statusAggregators map[string]*TabletStatusAggregator -} - -func createL2VTGateGateway(hc discovery.HealthCheck, topoServer *topo.Server, serv srvtopo.Server, cell string, retryCount int) Gateway { - lg := &l2VTGateGateway{ - retryCount: retryCount, - connMap: make(map[string][]*l2VTGateConn), - tabletConnMap: make(map[string]queryservice.QueryService), - statusAggregators: make(map[string]*TabletStatusAggregator), - } - - for _, a := range l2VTGateGatewayAddrs { - parts := strings.Split(a, "|") - if len(parts) != 3 { - log.Exitf("invalid l2vtgategateway_addrs parameter: %v", a) - } - - if err := lg.addL2VTGateConn(parts[0], parts[1], parts[2]); err != nil { - log.Exitf("error adding l2vtgategateway_addrs value %v: %v", a, err) - } - } - lg.QueryService = queryservice.Wrap(nil, lg.withRetry) - - return lg -} - -// addL2VTGateConn adds a backend l2vtgate for the provided keyspace / shard. -func (lg *l2VTGateGateway) addL2VTGateConn(addr, keyspace, shard string) error { - lg.mu.Lock() - defer lg.mu.Unlock() - - // extract keyrange if it's a range - canonical, kr, err := topo.ValidateShardName(shard) - if err != nil { - return fmt.Errorf("error parsing shard name %v: %v", shard, err) - } - - // check for duplicates - for _, c := range lg.connMap[keyspace] { - if c.shard == canonical { - return fmt.Errorf("duplicate %v/%v entry", keyspace, shard) - } - } - - // See if we already have a valid connection - conn, ok := lg.tabletConnMap[addr] - if !ok { - // Dial in the background, as specified by timeout=0. - conn, err = tabletconn.GetDialer()(&topodatapb.Tablet{ - Hostname: addr, - }, grpcclient.FailFast(true)) - if err != nil { - return err - } - lg.tabletConnMap[addr] = conn - } - - lg.connMap[keyspace] = append(lg.connMap[keyspace], &l2VTGateConn{ - addr: addr, - keyspace: keyspace, - shard: canonical, - keyRange: kr, - conn: conn, - }) - return nil -} - -// WaitForTablets is part of the Gateway interface. We don't implement it, -// as we don't have anything to wait for. -func (lg *l2VTGateGateway) WaitForTablets(ctx context.Context, tabletTypesToWait []topodatapb.TabletType) error { - return nil -} - -// StreamHealth is currently not implemented. -// This function hides the inner implementation. -// TODO(alainjobart): Maybe we should? -func (lg *l2VTGateGateway) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error { - panic("not implemented") -} - -// Close shuts down underlying connections. -// This function hides the inner implementation. -func (lg *l2VTGateGateway) Close(ctx context.Context) error { - lg.mu.Lock() - defer lg.mu.Unlock() - - // This will wait for all on-going queries before returning. - for _, c := range lg.tabletConnMap { - c.Close(ctx) - } - lg.tabletConnMap = make(map[string]queryservice.QueryService) - lg.connMap = make(map[string][]*l2VTGateConn) - return nil -} - -// CacheStatus returns a list of TabletCacheStatus per -// keyspace/shard/tablet_type. -func (lg *l2VTGateGateway) CacheStatus() TabletCacheStatusList { - lg.mu.RLock() - res := make(TabletCacheStatusList, 0, len(lg.statusAggregators)) - for _, aggr := range lg.statusAggregators { - res = append(res, aggr.GetCacheStatus()) - } - lg.mu.RUnlock() - sort.Sort(res) - return res -} - -// getConn returns the right l2VTGateConn for a given keyspace / shard. -func (lg *l2VTGateGateway) getConn(keyspace, shard string) (*l2VTGateConn, error) { - lg.mu.RLock() - defer lg.mu.RUnlock() - - canonical, kr, err := topo.ValidateShardName(shard) - if err != nil { - return nil, fmt.Errorf("invalid shard name: %v", shard) - } - - for _, c := range lg.connMap[keyspace] { - if canonical == c.shard { - // Exact match (probably a non-sharded keyspace). - return c, nil - } - if kr != nil && c.keyRange != nil && key.KeyRangeIncludes(c.keyRange, kr) { - // The shard KeyRange is included in this destination's - // KeyRange, that's the destination we want. - return c, nil - } - } - - return nil, fmt.Errorf("no configured destination for %v/%v", keyspace, shard) -} - -// withRetry gets available connections and executes the action. If there are retryable errors, -// it retries retryCount times before failing. It does not retry if the connection is in -// the middle of a transaction. While returning the error check if it maybe a result of -// a resharding event, and set the re-resolve bit and let the upper layers -// re-resolve and retry. -func (lg *l2VTGateGateway) withRetry(ctx context.Context, target *querypb.Target, conn queryservice.QueryService, name string, inTransaction bool, inner func(context.Context, *querypb.Target, queryservice.QueryService) (error, bool)) error { - l2conn, err := lg.getConn(target.Keyspace, target.Shard) - if err != nil { - return fmt.Errorf("no configured destination for %v/%v: %v", target.Keyspace, target.Shard, err) - } - - for i := 0; i < lg.retryCount+1; i++ { - startTime := time.Now() - var canRetry bool - err, canRetry = inner(ctx, target, l2conn.conn) - lg.updateStats(l2conn, target.TabletType, startTime, err) - if canRetry { - continue - } - break - } - return NewShardError(err, target, nil, inTransaction) -} - -func (lg *l2VTGateGateway) updateStats(conn *l2VTGateConn, tabletType topodatapb.TabletType, startTime time.Time, err error) { - elapsed := time.Now().Sub(startTime) - aggr := lg.getStatsAggregator(conn, tabletType) - aggr.UpdateQueryInfo("", tabletType, elapsed, err != nil) -} - -func (lg *l2VTGateGateway) getStatsAggregator(conn *l2VTGateConn, tabletType topodatapb.TabletType) *TabletStatusAggregator { - key := fmt.Sprintf("%v:%v", conn.addr, topoproto.TabletTypeLString(tabletType)) - - // get existing aggregator - lg.mu.RLock() - aggr, ok := lg.statusAggregators[key] - lg.mu.RUnlock() - if ok { - return aggr - } - // create a new one, but check again before the creation - lg.mu.Lock() - defer lg.mu.Unlock() - aggr, ok = lg.statusAggregators[key] - if ok { - return aggr - } - aggr = NewTabletStatusAggregator(conn.keyspace, conn.shard, tabletType, key) - lg.statusAggregators[key] = aggr - return aggr -} diff --git a/go/vt/vtgate/gateway/shard_error.go b/go/vt/vtgate/gateway/shard_error.go index 688f7781bb7..fc0d50e3e13 100644 --- a/go/vt/vtgate/gateway/shard_error.go +++ b/go/vt/vtgate/gateway/shard_error.go @@ -33,5 +33,8 @@ func NewShardError(in error, target *querypb.Target, tablet *topodatapb.Tablet, if tablet != nil { return vterrors.Errorf(vterrors.Code(in), "target: %s.%s.%s, used tablet: %s, %v", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), topotools.TabletIdent(tablet), in) } - return vterrors.Errorf(vterrors.Code(in), "target: %s.%s.%s, %v", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), in) + if target != nil { + return vterrors.Errorf(vterrors.Code(in), "target: %s.%s.%s, %v", target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType), in) + } + return vterrors.Errorf(vterrors.Code(in), "%v", in) } diff --git a/go/vt/vtgate/gatewaytest/grpc_discovery_test.go b/go/vt/vtgate/gatewaytest/grpc_discovery_test.go index 918986681f8..d50443c3836 100644 --- a/go/vt/vtgate/gatewaytest/grpc_discovery_test.go +++ b/go/vt/vtgate/gatewaytest/grpc_discovery_test.go @@ -18,7 +18,6 @@ package gatewaytest import ( "flag" - "fmt" "net" "testing" "time" @@ -29,8 +28,8 @@ import ( "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/srvtopo" + "github.com/youtube/vitess/go/vt/vtgate" "github.com/youtube/vitess/go/vt/vtgate/gateway" - "github.com/youtube/vitess/go/vt/vtgate/l2vtgate" "github.com/youtube/vitess/go/vt/vttablet/grpcqueryservice" "github.com/youtube/vitess/go/vt/vttablet/tabletconntest" @@ -92,11 +91,12 @@ func TestGRPCDiscovery(t *testing.T) { TestSuite(t, "discovery-grpc", dg, service) } -// TestL2VTGateDiscovery tests the l2vtgate gateway with a gRPC +// TestL2VTGateDiscovery tests the hybrid gateway with a gRPC // connection from the gateway to a l2vtgate in-process object. func TestL2VTGateDiscovery(t *testing.T) { flag.Set("tablet_protocol", "grpc") flag.Set("gateway_implementation", "discoverygateway") + flag.Set("enable_forwarding", "true") // Fake services for the tablet, topo server. service, ts, cell := CreateFakeServers(t) @@ -120,7 +120,7 @@ func TestL2VTGateDiscovery(t *testing.T) { // Wait for the right tablets to be present. hc := discovery.NewHealthCheck(10*time.Second, 2*time.Minute) rs := srvtopo.NewResilientServer(ts, "TestL2VTGateDiscovery") - l2vtgate := l2vtgate.Init(hc, ts, rs, "", cell, 2, nil) + l2vtgate := vtgate.Init(context.Background(), hc, ts, rs, cell, 2, nil) hc.AddTablet(&topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ Cell: cell, @@ -149,16 +149,18 @@ func TestL2VTGateDiscovery(t *testing.T) { // L2VTGate: create a gRPC server and listen on the port. server = grpc.NewServer() - grpcqueryservice.Register(server, l2vtgate) + grpcqueryservice.Register(server, l2vtgate.L2VTGate()) go server.Serve(listener) defer server.Stop() - // VTGate: create the l2vtgate gateway - flag.Set("gateway_implementation", "l2vtgategateway") - flag.Set("l2vtgategateway_addrs", fmt.Sprintf("%v|%v|%v", listener.Addr().String(), tabletconntest.TestTarget.Keyspace, tabletconntest.TestTarget.Shard)) - lg := gateway.GetCreator()(nil, ts, nil, "", 2) - defer lg.Close(ctx) + // VTGate: create the HybridGateway, with no local gateway, + // and just the remote address in the l2vtgate pool. + hg, err := gateway.NewHybridGateway(nil, []string{listener.Addr().String()}, 2) + if err != nil { + t.Fatalf("gateway.NewHybridGateway() failed: %v", err) + } + defer hg.Close(ctx) // and run the test suite. - TestSuite(t, "l2vtgate-grpc", lg, service) + TestSuite(t, "l2vtgate-grpc", hg, service) } diff --git a/go/vt/vtgate/l2vtgate/l2vtgate.go b/go/vt/vtgate/l2vtgate.go similarity index 63% rename from go/vt/vtgate/l2vtgate/l2vtgate.go rename to go/vt/vtgate/l2vtgate.go index d4348b2fe41..377a5a1af82 100644 --- a/go/vt/vtgate/l2vtgate/l2vtgate.go +++ b/go/vt/vtgate/l2vtgate.go @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package l2vtgate provides the core functionnality of a second-layer vtgate -// to route queries from an original vtgate to a subset of tablets. -package l2vtgate +package vtgate import ( "time" @@ -25,17 +23,13 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/stats" - "github.com/youtube/vitess/go/vt/discovery" "github.com/youtube/vitess/go/vt/servenv" - "github.com/youtube/vitess/go/vt/srvtopo" - "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/gateway" "github.com/youtube/vitess/go/vt/vttablet/queryservice" querypb "github.com/youtube/vitess/go/vt/proto/query" - topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) @@ -47,9 +41,9 @@ var ( // the underlying gateway. type L2VTGate struct { queryservice.QueryService - timings *stats.MultiTimings - tabletCallErrorCount *stats.MultiCounters - gateway gateway.Gateway + timings *stats.MultiTimings + errorCounts *stats.MultiCounters + gateway gateway.Gateway } // RegisterL2VTGate defines the type of registration mechanism. @@ -58,26 +52,16 @@ type RegisterL2VTGate func(queryservice.QueryService) // RegisterL2VTGates stores register funcs for L2VTGate server. var RegisterL2VTGates []RegisterL2VTGate -// Init creates the single L2VTGate with the provided parameters. -func Init(hc discovery.HealthCheck, topoServer *topo.Server, serv srvtopo.Server, statsName, cell string, retryCount int, tabletTypesToWait []topodatapb.TabletType) *L2VTGate { +// initL2VTGate creates the single L2VTGate with the provided parameters. +func initL2VTGate(gw gateway.Gateway) *L2VTGate { if l2VTGate != nil { log.Fatalf("L2VTGate already initialized") } - tabletCallErrorCountStatsName := "" - if statsName != "" { - tabletCallErrorCountStatsName = statsName + "ErrorCount" - } - - gw := gateway.GetCreator()(hc, topoServer, serv, cell, retryCount) - if err := gateway.WaitForTablets(gw, tabletTypesToWait); err != nil { - log.Fatalf("gateway.WaitForTablets failed: %v", err) - } - l2VTGate = &L2VTGate{ - timings: stats.NewMultiTimings(statsName, []string{"Operation", "Keyspace", "ShardName", "DbType"}), - tabletCallErrorCount: stats.NewMultiCounters(tabletCallErrorCountStatsName, []string{"Operation", "Keyspace", "ShardName", "DbType"}), - gateway: gw, + timings: stats.NewMultiTimings("QueryServiceCall", []string{"Operation", "Keyspace", "ShardName", "DbType"}), + errorCounts: stats.NewMultiCounters("QueryServiceCallErrorCount", []string{"Operation", "Keyspace", "ShardName", "DbType"}), + gateway: gw, } l2VTGate.QueryService = queryservice.Wrap( gw, @@ -98,11 +82,6 @@ func Init(hc discovery.HealthCheck, topoServer *topo.Server, serv srvtopo.Server return l2VTGate } -// Gateway returns this l2vtgate Gateway object (for tests mainly). -func (l *L2VTGate) Gateway() gateway.Gateway { - return l.gateway -} - func (l *L2VTGate) startAction(name string, target *querypb.Target) (time.Time, []string) { statsKey := []string{name, target.Keyspace, target.Shard, topoproto.TabletTypeLString(target.TabletType)} startTime := time.Now() @@ -116,13 +95,8 @@ func (l *L2VTGate) endAction(startTime time.Time, statsKey []string, err *error) // client queries and are not VTGate's fault. ec := vterrors.Code(*err) if ec != vtrpcpb.Code_ALREADY_EXISTS && ec != vtrpcpb.Code_INVALID_ARGUMENT { - l.tabletCallErrorCount.Add(statsKey, 1) + l.errorCounts.Add(statsKey, 1) } } l.timings.Record(statsKey, startTime) } - -// GetGatewayCacheStatus returns a displayable version of the Gateway cache. -func (l *L2VTGate) GetGatewayCacheStatus() gateway.TabletCacheStatusList { - return l.gateway.CacheStatus() -} diff --git a/go/vt/vtgate/planbuilder/dml.go b/go/vt/vtgate/planbuilder/dml.go index 3e33533d435..59b8217cad9 100644 --- a/go/vt/vtgate/planbuilder/dml.go +++ b/go/vt/vtgate/planbuilder/dml.go @@ -56,7 +56,7 @@ func buildUpdatePlan(upd *sqlparser.Update, vschema VSchema) (*engine.Route, err er.Keyspace = rb.ERoute.Keyspace if !er.Keyspace.Sharded { // We only validate non-table subexpressions because the previous analysis has already validated them. - if !validateSubquerySamePlan(rb.ERoute, vschema, upd.Exprs, upd.Where, upd.OrderBy, upd.Limit) { + if !validateSubquerySamePlan(rb.ERoute, rb, vschema, upd.Exprs, upd.Where, upd.OrderBy, upd.Limit) { return nil, errors.New("unsupported: sharded subqueries in DML") } er.Opcode = engine.UpdateUnsharded @@ -105,37 +105,46 @@ func generateQuery(statement sqlparser.Statement) string { func buildChangedVindexesValues(route *engine.Route, update *sqlparser.Update, colVindexes []*vindexes.ColumnVindex) (map[string][]sqltypes.PlanValue, error) { changedVindexes := make(map[string][]sqltypes.PlanValue) for i, vindex := range colVindexes { - for _, assignment := range update.Exprs { - for _, vcol := range vindex.Columns { - if vcol.Equal(assignment.Name.Name) { - pv, err := extractValueFromUpdate(assignment, vcol) - if err != nil { - return changedVindexes, err - } - changedVindexes[vindex.Name] = append(changedVindexes[vindex.Name], pv) + var vindexValues []sqltypes.PlanValue + for _, vcol := range vindex.Columns { + // Searching in order of columns in colvindex. + found := false + for _, assignment := range update.Exprs { + if !vcol.Equal(assignment.Name.Name) { + continue } + if found { + return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "column has duplicate set values: '%v'", assignment.Name.Name) + } + found = true + pv, err := extractValueFromUpdate(assignment, vcol) + if err != nil { + return nil, err + } + vindexValues = append(vindexValues, pv) } } - if len(changedVindexes[vindex.Name]) == 0 { + if len(vindexValues) == 0 { // Vindex not changing, continue continue } - if len(changedVindexes[vindex.Name]) != len(vindex.Columns) { - return changedVindexes, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: update does not have values for all the columns in vindex (%s)", vindex.Name) + if len(vindexValues) != len(vindex.Columns) { + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: update does not have values for all the columns in vindex (%s)", vindex.Name) } if update.Limit != nil && len(update.OrderBy) == 0 { - return changedVindexes, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: Need to provide order by clause when using limit. Invalid update on vindex: %v", vindex.Name) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: Need to provide order by clause when using limit. Invalid update on vindex: %v", vindex.Name) } if i == 0 { - return changedVindexes, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: You can't update primary vindex columns. Invalid update on vindex: %v", vindex.Name) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: You can't update primary vindex columns. Invalid update on vindex: %v", vindex.Name) } if _, ok := vindex.Vindex.(vindexes.Lookup); !ok { - return changedVindexes, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: You can only update lookup vindexes. Invalid update on vindex: %v", vindex.Name) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: You can only update lookup vindexes. Invalid update on vindex: %v", vindex.Name) } if !vindex.Owned { - return changedVindexes, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: You can only update owned vindexes. Invalid update on vindex: %v", vindex.Name) + return nil, vterrors.Errorf(vtrpcpb.Code_UNIMPLEMENTED, "unsupported: You can only update owned vindexes. Invalid update on vindex: %v", vindex.Name) } + changedVindexes[vindex.Name] = vindexValues } return changedVindexes, nil @@ -157,7 +166,7 @@ func buildDeletePlan(del *sqlparser.Delete, vschema VSchema) (*engine.Route, err er.Keyspace = rb.ERoute.Keyspace if !er.Keyspace.Sharded { // We only validate non-table subexpressions because the previous analysis has already validated them. - if !validateSubquerySamePlan(rb.ERoute, vschema, del.Targets, del.Where, del.OrderBy, del.Limit) { + if !validateSubquerySamePlan(rb.ERoute, rb, vschema, del.Targets, del.Where, del.OrderBy, del.Limit) { return nil, errors.New("unsupported: sharded subqueries in DML") } er.Opcode = engine.DeleteUnsharded @@ -178,10 +187,22 @@ func buildDeletePlan(del *sqlparser.Delete, vschema VSchema) (*engine.Route, err return nil, err } err = getDMLRouting(del.Where, er) + // We couldn't generate a route for a single shard + // Execute a delete sharded if err != nil { - return nil, err + er.Opcode = engine.DeleteSharded + } else { + er.Opcode = engine.DeleteEqual + } + + if er.Opcode == engine.DeleteSharded { + if len(er.Table.Owned) != 0 { + return er, errors.New("unsupported: multi shard delete on a table with owned lookup vindexes") + } + if del.Limit != nil { + return er, errors.New("unsupported: multi shard delete with limit") + } } - er.Opcode = engine.DeleteEqual er.Subquery = generateDeleteSubquery(del, er.Table) return er, nil } diff --git a/go/vt/vtgate/planbuilder/expr.go b/go/vt/vtgate/planbuilder/expr.go index 1e6151ee355..e376e8f3e3b 100644 --- a/go/vt/vtgate/planbuilder/expr.go +++ b/go/vt/vtgate/planbuilder/expr.go @@ -151,7 +151,7 @@ func hasSubquery(node sqlparser.SQLNode) bool { return has } -func validateSubquerySamePlan(outer *engine.Route, vschema VSchema, nodes ...sqlparser.SQLNode) bool { +func validateSubquerySamePlan(outerRoute *engine.Route, bldr builder, vschema VSchema, nodes ...sqlparser.SQLNode) bool { samePlan := true for _, node := range nodes { @@ -165,7 +165,7 @@ func validateSubquerySamePlan(outer *engine.Route, vschema VSchema, nodes ...sql if !inSubQuery { return true, nil } - bldr, err := processSelect(nodeType, vschema, nil) + bldr, err := processSelect(nodeType, vschema, bldr) if err != nil { samePlan = false return false, err @@ -175,7 +175,7 @@ func validateSubquerySamePlan(outer *engine.Route, vschema VSchema, nodes ...sql samePlan = false return false, errors.New("dummy") } - if innerRoute.ERoute.Keyspace.Name != outer.Keyspace.Name { + if innerRoute.ERoute.Keyspace.Name != outerRoute.Keyspace.Name { samePlan = false return false, errors.New("dummy") } @@ -193,7 +193,7 @@ func validateSubquerySamePlan(outer *engine.Route, vschema VSchema, nodes ...sql samePlan = false return false, errors.New("dummy") } - if innerRoute.ERoute.Keyspace.Name != outer.Keyspace.Name { + if innerRoute.ERoute.Keyspace.Name != outerRoute.Keyspace.Name { samePlan = false return false, errors.New("dummy") } diff --git a/go/vt/vtgate/planbuilder/insert.go b/go/vt/vtgate/planbuilder/insert.go index 5707c9c2925..e8c15eba32b 100644 --- a/go/vt/vtgate/planbuilder/insert.go +++ b/go/vt/vtgate/planbuilder/insert.go @@ -48,7 +48,7 @@ func buildInsertUnshardedPlan(ins *sqlparser.Insert, table *vindexes.Table, vsch Table: table, Keyspace: table.Keyspace, } - if !validateSubquerySamePlan(eRoute, vschema, ins) { + if !validateSubquerySamePlan(eRoute, nil, vschema, ins) { return nil, errors.New("unsupported: sharded subquery in insert values") } var rows sqlparser.Values diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index e4b29dca02b..7893a2582be 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -48,6 +48,8 @@ func newHashIndex(name string, _ map[string]string) (vindexes.Vindex, error) { return &hashIndex{name: name}, nil } +var _ vindexes.Unique = (*hashIndex)(nil) + // lookupIndex satisfies Lookup, Unique. type lookupIndex struct{ name string } @@ -59,11 +61,17 @@ func (*lookupIndex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool func (*lookupIndex) Map(vindexes.VCursor, []sqltypes.Value) ([][]byte, error) { return nil, nil } func (*lookupIndex) Create(vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil } func (*lookupIndex) Delete(vindexes.VCursor, [][]sqltypes.Value, []byte) error { return nil } +func (*lookupIndex) Update(vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { + return nil +} func newLookupIndex(name string, _ map[string]string) (vindexes.Vindex, error) { return &lookupIndex{name: name}, nil } +var _ vindexes.Unique = (*lookupIndex)(nil) +var _ vindexes.Lookup = (*lookupIndex)(nil) + // multiIndex satisfies Lookup, NonUnique. type multiIndex struct{ name string } @@ -72,14 +80,20 @@ func (*multiIndex) Cost() int { return 3 } func (*multiIndex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil } -func (*multiIndex) Map(vindexes.VCursor, []sqltypes.Value) ([][][]byte, error) { return nil, nil } +func (*multiIndex) Map(vindexes.VCursor, []sqltypes.Value) ([]vindexes.Ksids, error) { return nil, nil } func (*multiIndex) Create(vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil } func (*multiIndex) Delete(vindexes.VCursor, [][]sqltypes.Value, []byte) error { return nil } +func (*multiIndex) Update(vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { + return nil +} func newMultiIndex(name string, _ map[string]string) (vindexes.Vindex, error) { return &multiIndex{name: name}, nil } +var _ vindexes.NonUnique = (*multiIndex)(nil) +var _ vindexes.Lookup = (*multiIndex)(nil) + // costlyIndex satisfies Lookup, NonUnique. type costlyIndex struct{ name string } @@ -88,14 +102,20 @@ func (*costlyIndex) Cost() int { return 10 } func (*costlyIndex) Verify(vindexes.VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil } -func (*costlyIndex) Map(vindexes.VCursor, []sqltypes.Value) ([][][]byte, error) { return nil, nil } +func (*costlyIndex) Map(vindexes.VCursor, []sqltypes.Value) ([]vindexes.Ksids, error) { return nil, nil } func (*costlyIndex) Create(vindexes.VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil } func (*costlyIndex) Delete(vindexes.VCursor, [][]sqltypes.Value, []byte) error { return nil } +func (*costlyIndex) Update(vindexes.VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { + return nil +} func newCostlyIndex(name string, _ map[string]string) (vindexes.Vindex, error) { return &costlyIndex{name: name}, nil } +var _ vindexes.NonUnique = (*costlyIndex)(nil) +var _ vindexes.Lookup = (*costlyIndex)(nil) + func init() { vindexes.Register("hash_test", newHashIndex) vindexes.Register("lookup_test", newLookupIndex) diff --git a/go/vt/vtgate/planbuilder/vindex_func.go b/go/vt/vtgate/planbuilder/vindex_func.go index 3d03ad269fa..9aaa6fd3b00 100644 --- a/go/vt/vtgate/planbuilder/vindex_func.go +++ b/go/vt/vtgate/planbuilder/vindex_func.go @@ -172,6 +172,10 @@ func (vf *vindexFunc) PushSelect(expr *sqlparser.AliasedExpr, _ columnOriginator vf.eVindexFunc.Cols = append(vf.eVindexFunc.Cols, 0) case col.Name.EqualString("keyspace_id"): vf.eVindexFunc.Cols = append(vf.eVindexFunc.Cols, 1) + case col.Name.EqualString("range_start"): + vf.eVindexFunc.Cols = append(vf.eVindexFunc.Cols, 2) + case col.Name.EqualString("range_end"): + vf.eVindexFunc.Cols = append(vf.eVindexFunc.Cols, 3) default: return nil, 0, fmt.Errorf("unrecognized column %s for vindex: %s", col.Name, vf.eVindexFunc.Vindex) } diff --git a/go/vt/vtgate/resolver.go b/go/vt/vtgate/resolver.go index e00f5fd207a..09876b09206 100644 --- a/go/vt/vtgate/resolver.go +++ b/go/vt/vtgate/resolver.go @@ -46,17 +46,19 @@ var ( // Resolver is the layer to resolve KeyspaceIds and KeyRanges // to shards. It will try to re-resolve shards if ScatterConn // returns retryable error, which may imply horizontal or vertical -// resharding happened. +// resharding happened. It is implemented using a srvtopo.Resolver. type Resolver struct { scatterConn *ScatterConn + resolver *srvtopo.Resolver toposerv srvtopo.Server cell string } // NewResolver creates a new Resolver. -func NewResolver(serv srvtopo.Server, cell string, sc *ScatterConn) *Resolver { +func NewResolver(resolver *srvtopo.Resolver, serv srvtopo.Server, cell string, sc *ScatterConn) *Resolver { return &Resolver{ scatterConn: sc, + resolver: resolver, toposerv: serv, cell: cell, } @@ -75,12 +77,10 @@ func (res *Resolver) ExecuteKeyspaceIds(ctx context.Context, sql string, bindVar if sqlparser.IsDML(sql) && len(keyspaceIds) > 1 { return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "DML should not span multiple keyspace_ids") } - mapToShards := func(k string) (string, []string, error) { - return srvtopo.MapKeyspaceIdsToShards( + mapToShards := func() ([]*srvtopo.ResolvedShard, error) { + return res.resolver.ResolveKeyspaceIds( ctx, - res.toposerv, - res.cell, - k, + keyspace, tabletType, keyspaceIds) } @@ -90,12 +90,10 @@ func (res *Resolver) ExecuteKeyspaceIds(ctx context.Context, sql string, bindVar // ExecuteKeyRanges executes a non-streaming query based on KeyRanges. // It retries query if new keyspace/shards are re-resolved after a retryable error. func (res *Resolver) ExecuteKeyRanges(ctx context.Context, sql string, bindVariables map[string]*querypb.BindVariable, keyspace string, keyRanges []*topodatapb.KeyRange, tabletType topodatapb.TabletType, session *vtgatepb.Session, notInTransaction bool, options *querypb.ExecuteOptions) (*sqltypes.Result, error) { - mapToShards := func(k string) (string, []string, error) { - return srvtopo.MapKeyRangesToShards( + mapToShards := func() ([]*srvtopo.ResolvedShard, error) { + return res.resolver.ResolveKeyRanges( ctx, - res.toposerv, - res.cell, - k, + keyspace, tabletType, keyRanges) } @@ -111,47 +109,37 @@ func (res *Resolver) Execute( keyspace string, tabletType topodatapb.TabletType, session *vtgatepb.Session, - mapToShards func(string) (string, []string, error), + mapToShards func() ([]*srvtopo.ResolvedShard, error), notInTransaction bool, options *querypb.ExecuteOptions, logStats *LogStats, ) (*sqltypes.Result, error) { - keyspace, shards, err := mapToShards(keyspace) + rss, err := mapToShards() if err != nil { return nil, err } if logStats != nil { - logStats.ShardQueries = uint32(len(shards)) + logStats.ShardQueries = uint32(len(rss)) } for { - qr, err := res.scatterConn.Execute( + qr, err := res.scatterConn.Execute2( ctx, sql, bindVars, - keyspace, - shards, + rss, tabletType, NewSafeSession(session), notInTransaction, options) if isRetryableError(err) { - resharding := false - newKeyspace, newShards, err := mapToShards(keyspace) + newRss, err := mapToShards() if err != nil { return nil, err } - // check keyspace change for vertical resharding - if newKeyspace != keyspace { - keyspace = newKeyspace - resharding = true - } - // check shards change for horizontal resharding - if !StrsEquals(newShards, shards) { - shards = newShards - resharding = true - } - // retry if resharding happened - if resharding { + if !srvtopo.ResolvedShardsEqual(rss, newRss) { + // If the mapping to underlying shards changed, + // we might be resharding. Try again. + rss = newRss continue } } @@ -176,22 +164,19 @@ func (res *Resolver) ExecuteEntityIds( notInTransaction bool, options *querypb.ExecuteOptions, ) (*sqltypes.Result, error) { - newKeyspace, shardIDMap, err := srvtopo.MapEntityIdsToShards( + rss, values, err := res.resolver.ResolveEntityIds( ctx, - res.toposerv, - res.cell, keyspace, entityKeyspaceIDs, tabletType) if err != nil { return nil, err } - keyspace = newKeyspace - shards, sqls, bindVars := buildEntityIds(shardIDMap, sql, entityColumnName, bindVariables) for { + sqls, bindVars := buildEntityIds(values, sql, entityColumnName, bindVariables) qr, err := res.scatterConn.ExecuteEntityIds( ctx, - shards, + rss, sqls, bindVars, keyspace, @@ -200,32 +185,18 @@ func (res *Resolver) ExecuteEntityIds( notInTransaction, options) if isRetryableError(err) { - resharding := false - newKeyspace, newShardIDMap, err := srvtopo.MapEntityIdsToShards( + newRss, newValues, err := res.resolver.ResolveEntityIds( ctx, - res.toposerv, - res.cell, keyspace, entityKeyspaceIDs, tabletType) if err != nil { return nil, err } - // check keyspace change for vertical resharding - if newKeyspace != keyspace { - keyspace = newKeyspace - resharding = true - } - // check shards change for horizontal resharding - newShards, newSqls, newBindVars := buildEntityIds(newShardIDMap, sql, entityColumnName, bindVariables) - if !StrsEquals(newShards, shards) { - shards = newShards - sqls = newSqls - bindVars = newBindVars - resharding = true - } - // retry if resharding happened - if resharding { + if !srvtopo.ResolvedShardsEqual(rss, newRss) || !srvtopo.ValuesEqual(values, newValues) { + // Retry if resharding happened. + rss = newRss + values = newValues continue } } @@ -236,37 +207,6 @@ func (res *Resolver) ExecuteEntityIds( } } -// boundKeyspaceIDQueriesToBoundShardQueries is a helper used by -// ExecuteBatchKeyspaceIds. -func boundKeyspaceIDQueriesToBoundShardQueries(ctx context.Context, topoServ srvtopo.Server, cell string, tabletType topodatapb.TabletType, idQueries []*vtgatepb.BoundKeyspaceIdQuery) ([]*vtgatepb.BoundShardQuery, error) { - shardQueries := make([]*vtgatepb.BoundShardQuery, len(idQueries)) - for i, idQuery := range idQueries { - keyspace, shards, err := srvtopo.MapKeyspaceIdsToShards(ctx, topoServ, cell, idQuery.Keyspace, tabletType, idQuery.KeyspaceIds) - if err != nil { - return nil, err - } - shardQueries[i] = &vtgatepb.BoundShardQuery{ - Query: idQuery.Query, - Keyspace: keyspace, - Shards: shards, - } - } - return shardQueries, nil -} - -// ExecuteBatchKeyspaceIds executes a group of queries based on KeyspaceIds. -// It retries query if new keyspace/shards are re-resolved after a retryable error. -func (res *Resolver) ExecuteBatchKeyspaceIds(ctx context.Context, queries []*vtgatepb.BoundKeyspaceIdQuery, tabletType topodatapb.TabletType, asTransaction bool, session *vtgatepb.Session, options *querypb.ExecuteOptions) ([]sqltypes.Result, error) { - buildBatchRequest := func() (*scatterBatchRequest, error) { - shardQueries, err := boundKeyspaceIDQueriesToBoundShardQueries(ctx, res.toposerv, res.cell, tabletType, queries) - if err != nil { - return nil, err - } - return boundShardQueriesToScatterBatchRequest(shardQueries) - } - return res.ExecuteBatch(ctx, tabletType, asTransaction, session, options, buildBatchRequest) -} - // ExecuteBatch executes a group of queries based on shards resolved by given func. // It retries query if new keyspace/shards are re-resolved after a retryable error. func (res *Resolver) ExecuteBatch( @@ -318,11 +258,9 @@ func (res *Resolver) ExecuteBatch( // response which is needed for checkpointing. // The api supports supplying multiple KeyspaceIds to make it future proof. func (res *Resolver) StreamExecuteKeyspaceIds(ctx context.Context, sql string, bindVariables map[string]*querypb.BindVariable, keyspace string, keyspaceIds [][]byte, tabletType topodatapb.TabletType, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { - mapToShards := func(k string) (string, []string, error) { - return srvtopo.MapKeyspaceIdsToShards( + mapToShards := func(k string) ([]*srvtopo.ResolvedShard, error) { + return res.resolver.ResolveKeyspaceIds( ctx, - res.toposerv, - res.cell, k, tabletType, keyspaceIds) @@ -337,11 +275,9 @@ func (res *Resolver) StreamExecuteKeyspaceIds(ctx context.Context, sql string, b // response which is needed for checkpointing. // The api supports supplying multiple keyranges to make it future proof. func (res *Resolver) StreamExecuteKeyRanges(ctx context.Context, sql string, bindVariables map[string]*querypb.BindVariable, keyspace string, keyRanges []*topodatapb.KeyRange, tabletType topodatapb.TabletType, options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error) error { - mapToShards := func(k string) (string, []string, error) { - return srvtopo.MapKeyRangesToShards( + mapToShards := func(k string) ([]*srvtopo.ResolvedShard, error) { + return res.resolver.ResolveKeyRanges( ctx, - res.toposerv, - res.cell, k, tabletType, keyRanges) @@ -359,11 +295,11 @@ func (res *Resolver) streamExecute( bindVars map[string]*querypb.BindVariable, keyspace string, tabletType topodatapb.TabletType, - mapToShards func(string) (string, []string, error), + mapToShards func(string) ([]*srvtopo.ResolvedShard, error), options *querypb.ExecuteOptions, callback func(*sqltypes.Result) error, ) error { - keyspace, shards, err := mapToShards(keyspace) + rss, err := mapToShards(keyspace) if err != nil { return err } @@ -371,8 +307,7 @@ func (res *Resolver) streamExecute( ctx, sql, bindVars, - keyspace, - shards, + rss, tabletType, options, callback) @@ -381,70 +316,64 @@ func (res *Resolver) streamExecute( // MessageStream streams messages. func (res *Resolver) MessageStream(ctx context.Context, keyspace string, shard string, keyRange *topodatapb.KeyRange, name string, callback func(*sqltypes.Result) error) error { - var shards []string + var rss []*srvtopo.ResolvedShard var err error if shard != "" { - // If we pass in a shard, resolve the keyspace following redirects. - keyspace, _, _, err = srvtopo.GetKeyspaceShards(ctx, res.toposerv, res.cell, keyspace, topodatapb.TabletType_MASTER) - shards = []string{shard} + // If we pass in a shard, resolve the keyspace/shard + // following redirects. + rss, err = res.resolver.ResolveShards(ctx, keyspace, []string{shard}, topodatapb.TabletType_MASTER) } else { // If we pass in a KeyRange, resolve it to the proper shards. // Note we support multiple shards here, we will just aggregate // the message streams. - keyspace, shards, err = srvtopo.MapExactShards(ctx, res.toposerv, res.cell, keyspace, topodatapb.TabletType_MASTER, keyRange) + rss, err = res.resolver.ResolveExactShards(ctx, keyspace, topodatapb.TabletType_MASTER, keyRange) } if err != nil { return err } - return res.scatterConn.MessageStream(ctx, keyspace, shards, name, callback) + return res.scatterConn.MessageStream(ctx, rss, name, callback) } // MessageAckKeyspaceIds routes message acks based on the associated keyspace ids. func (res *Resolver) MessageAckKeyspaceIds(ctx context.Context, keyspace, name string, idKeyspaceIDs []*vtgatepb.IdKeyspaceId) (int64, error) { - newKeyspace, _, allShards, err := srvtopo.GetKeyspaceShards(ctx, res.toposerv, res.cell, keyspace, topodatapb.TabletType_MASTER) + ids := make([]*querypb.Value, len(idKeyspaceIDs)) + ksids := make([][]byte, len(idKeyspaceIDs)) + for i, iki := range idKeyspaceIDs { + ids[i] = iki.Id + ksids[i] = iki.KeyspaceId + } + + rss, values, err := res.resolver.ResolveKeyspaceIdsValues(ctx, keyspace, ids, ksids, topodatapb.TabletType_MASTER) if err != nil { return 0, err } - shardIDs := make(map[string][]*querypb.Value) - for _, idKeyspaceID := range idKeyspaceIDs { - shard, err := srvtopo.GetShardForKeyspaceID(allShards, idKeyspaceID.KeyspaceId) - if err != nil { - return 0, err - } - shardIDs[shard] = append(shardIDs[shard], idKeyspaceID.Id) - } - return res.scatterConn.MessageAck(ctx, newKeyspace, shardIDs, name) + return res.scatterConn.MessageAck(ctx, rss, values, name) } // UpdateStream streams the events. // TODO(alainjobart): Implement the multi-shards merge code. func (res *Resolver) UpdateStream(ctx context.Context, keyspace string, shard string, keyRange *topodatapb.KeyRange, tabletType topodatapb.TabletType, timestamp int64, event *querypb.EventToken, callback func(*querypb.StreamEvent, int64) error) error { + var rs *srvtopo.ResolvedShard if shard != "" { - // If we pass in a shard, resolve the keyspace following redirects. - var err error - keyspace, _, _, err = srvtopo.GetKeyspaceShards(ctx, res.toposerv, res.cell, keyspace, tabletType) + // If we pass in a shard, resolve the keyspace/shard + // following redirects. + rss, err := res.resolver.ResolveShards(ctx, keyspace, []string{shard}, tabletType) if err != nil { return err } + rs = rss[0] } else { - // If we pass in a KeyRange, resolve it to one shard only for now. - var shards []string - var err error - keyspace, shards, err = srvtopo.MapExactShards( - ctx, - res.toposerv, - res.cell, - keyspace, - tabletType, - keyRange) + // If we pass in a KeyRange, resolve it to one shard + // only for now. + rss, err := res.resolver.ResolveExactShards(ctx, keyspace, tabletType, keyRange) if err != nil { return err } - if len(shards) != 1 { - return fmt.Errorf("UpdateStream only supports exactly one shard per keyrange at the moment, but provided keyrange %v maps to %v", keyRange, shards) + if len(rss) != 1 { + return fmt.Errorf("UpdateStream only supports exactly one shard per keyrange at the moment, but provided keyrange %v maps to %v shards", keyRange, len(rss)) } - shard = shards[0] + rs = rss[0] } // Just send it to ScatterConn. With just one connection, the @@ -455,12 +384,7 @@ func (res *Resolver) UpdateStream(ctx context.Context, keyspace string, shard st position = event.Position timestamp = 0 } - target := &querypb.Target{ - Keyspace: keyspace, - Shard: shard, - TabletType: tabletType, - } - return res.scatterConn.UpdateStream(ctx, target, timestamp, position, func(se *querypb.StreamEvent) error { + return res.scatterConn.UpdateStream(ctx, rs, timestamp, position, func(se *querypb.StreamEvent) error { var timestamp int64 if se.EventToken != nil { timestamp = se.EventToken.Timestamp @@ -490,32 +414,29 @@ func StrsEquals(a, b []string) bool { return true } -func buildEntityIds(shardIDMap map[string][]*querypb.Value, qSQL, entityColName string, qBindVars map[string]*querypb.BindVariable) ([]string, map[string]string, map[string]map[string]*querypb.BindVariable) { - shards := make([]string, len(shardIDMap)) - shardsIdx := 0 - sqls := make(map[string]string) - bindVars := make(map[string]map[string]*querypb.BindVariable) - for shard, values := range shardIDMap { +// buildEntityIds populates SQL and BindVariables. +func buildEntityIds(values [][]*querypb.Value, qSQL, entityColName string, qBindVars map[string]*querypb.BindVariable) ([]string, []map[string]*querypb.BindVariable) { + sqls := make([]string, len(values)) + bindVars := make([]map[string]*querypb.BindVariable, len(values)) + for i, val := range values { var b bytes.Buffer b.Write([]byte(entityColName)) - bindVar := make(map[string]*querypb.BindVariable) + bindVariables := make(map[string]*querypb.BindVariable) for k, v := range qBindVars { - bindVar[k] = v + bindVariables[k] = v } bvName := fmt.Sprintf("%v_entity_ids", entityColName) - bindVar[bvName] = &querypb.BindVariable{ + bindVariables[bvName] = &querypb.BindVariable{ Type: querypb.Type_TUPLE, - Values: values, + Values: val, } b.Write(inOperator) b.Write(sqlListIdentifier) b.Write([]byte(bvName)) - bindVars[shard] = bindVar - sqls[shard] = insertSQLClause(qSQL, b.String()) - shards[shardsIdx] = shard - shardsIdx++ + sqls[i] = insertSQLClause(qSQL, b.String()) + bindVars[i] = bindVariables } - return shards, sqls, bindVars + return sqls, bindVars } func insertSQLClause(querySQL, clause string) string { diff --git a/go/vt/vtgate/resolver_test.go b/go/vt/vtgate/resolver_test.go index b69760f369d..77015457c04 100644 --- a/go/vt/vtgate/resolver_test.go +++ b/go/vt/vtgate/resolver_test.go @@ -19,7 +19,6 @@ limitations under the License. package vtgate import ( - "encoding/json" "fmt" "reflect" "sort" @@ -95,22 +94,25 @@ func TestResolverExecuteEntityIds(t *testing.T) { func TestResolverExecuteBatchKeyspaceIds(t *testing.T) { testResolverGeneric(t, "TestResolverExecuteBatchKeyspaceIds", func(res *Resolver) (*sqltypes.Result, error) { - qrs, err := res.ExecuteBatchKeyspaceIds(context.Background(), - []*vtgatepb.BoundKeyspaceIdQuery{{ - Query: &querypb.BoundQuery{ - Sql: "query", - BindVariables: nil, - }, - Keyspace: "TestResolverExecuteBatchKeyspaceIds", - KeyspaceIds: [][]byte{ - {0x10}, - {0x25}, - }, - }}, + queries := []*vtgatepb.BoundKeyspaceIdQuery{{ + Query: &querypb.BoundQuery{ + Sql: "query", + BindVariables: nil, + }, + Keyspace: "TestResolverExecuteBatchKeyspaceIds", + KeyspaceIds: [][]byte{ + {0x10}, + {0x25}, + }, + }} + qrs, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, false, nil, - nil) + nil, + func() (*scatterBatchRequest, error) { + return boundKeyspaceIDQueriesToScatterBatchRequest(context.Background(), res.resolver, queries, topodatapb.TabletType_MASTER) + }) if err != nil { return nil, err } @@ -215,10 +217,10 @@ func testResolverGeneric(t *testing.T, name string, action func(res *Resolver) ( sbc0.MustFailCodes[vtrpcpb.Code_INVALID_ARGUMENT] = 1 sbc1.MustFailCodes[vtrpcpb.Code_INTERNAL] = 1 _, err = action(res) - want1 := fmt.Sprintf("target: %s.-20.master, used tablet: aa-0 (-20), INVALID_ARGUMENT error", name) - want2 := fmt.Sprintf("target: %s.20-40.master, used tablet: aa-0 (20-40), INTERNAL error", name) - want := []string{want1, want2} - sort.Strings(want) + want := []string{ + fmt.Sprintf("target: %s.-20.master, used tablet: aa-0 (-20), INVALID_ARGUMENT error", name), + fmt.Sprintf("target: %s.20-40.master, used tablet: aa-0 (20-40), INTERNAL error", name), + } if err == nil { t.Errorf("want\n%v\ngot\n%v", want, err) } else { @@ -248,10 +250,10 @@ func testResolverGeneric(t *testing.T, name string, action func(res *Resolver) ( sbc0.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 sbc1.MustFailCodes[vtrpcpb.Code_FAILED_PRECONDITION] = 1 _, err = action(res) - want1 = fmt.Sprintf("target: %s.-20.master, used tablet: aa-0 (-20), FAILED_PRECONDITION error", name) - want2 = fmt.Sprintf("target: %s.20-40.master, used tablet: aa-0 (20-40), FAILED_PRECONDITION error", name) - want = []string{want1, want2} - sort.Strings(want) + want = []string{ + fmt.Sprintf("target: %s.-20.master, used tablet: aa-0 (-20), FAILED_PRECONDITION error", name), + fmt.Sprintf("target: %s.20-40.master, used tablet: aa-0 (20-40), FAILED_PRECONDITION error", name), + } if err == nil { t.Errorf("want\n%v\ngot\n%v", want, err) } else { @@ -524,38 +526,37 @@ func TestResolverInsertSqlClause(t *testing.T) { } func TestResolverBuildEntityIds(t *testing.T) { - shardMap := map[string][]*querypb.Value{ - "-20": {{ - Type: querypb.Type_VARCHAR, - Value: []byte("0"), - }, { - Type: querypb.Type_INT64, - Value: []byte("1"), - }}, - "20-40": {{ - Type: querypb.Type_VARCHAR, - Value: []byte("2"), - }}, + values := [][]*querypb.Value{ + { + { + Type: querypb.Type_VARCHAR, + Value: []byte("0"), + }, + { + Type: querypb.Type_INT64, + Value: []byte("1"), + }, + }, + { + { + Type: querypb.Type_VARCHAR, + Value: []byte("2"), + }, + }, } sql := "select a from table where id=:id" entityColName := "uid" bindVar := map[string]*querypb.BindVariable{ "id": sqltypes.Int64BindVariable(10), } - shards, sqls, bindVars := buildEntityIds(shardMap, sql, entityColName, bindVar) - wantShards := []string{"-20", "20-40"} - wantSqls := map[string]string{ - "-20": "select a from table where id=:id and uid in ::uid_entity_ids", - "20-40": "select a from table where id=:id and uid in ::uid_entity_ids", - } - wantBindVars := map[string]map[string]*querypb.BindVariable{ - "-20": {"id": sqltypes.Int64BindVariable(10), "uid_entity_ids": sqltypes.TestBindVariable([]interface{}{"0", 1})}, - "20-40": {"id": sqltypes.Int64BindVariable(10), "uid_entity_ids": sqltypes.TestBindVariable([]interface{}{"2"})}, + sqls, bindVars := buildEntityIds(values, sql, entityColName, bindVar) + wantSqls := []string{ + "select a from table where id=:id and uid in ::uid_entity_ids", + "select a from table where id=:id and uid in ::uid_entity_ids", } - sort.Strings(wantShards) - sort.Strings(shards) - if !reflect.DeepEqual(wantShards, shards) { - t.Errorf("want %+v, got %+v", wantShards, shards) + wantBindVars := []map[string]*querypb.BindVariable{ + {"id": sqltypes.Int64BindVariable(10), "uid_entity_ids": sqltypes.TestBindVariable([]interface{}{"0", 1})}, + {"id": sqltypes.Int64BindVariable(10), "uid_entity_ids": sqltypes.TestBindVariable([]interface{}{"2"})}, } if !reflect.DeepEqual(wantSqls, sqls) { t.Errorf("want %+v, got %+v", wantSqls, sqls) @@ -608,7 +609,7 @@ func TestResolverExecBatchReresolve(t *testing.T) { Keyspace: keyspace, Shards: []string{"0"}, }} - return boundShardQueriesToScatterBatchRequest(queries) + return boundShardQueriesToScatterBatchRequest(context.Background(), res.resolver, queries, topodatapb.TabletType_MASTER) } _, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, false, nil, nil, buildBatchRequest) @@ -645,7 +646,7 @@ func TestResolverExecBatchAsTransaction(t *testing.T) { Keyspace: keyspace, Shards: []string{"0"}, }} - return boundShardQueriesToScatterBatchRequest(queries) + return boundShardQueriesToScatterBatchRequest(context.Background(), res.resolver, queries, topodatapb.TabletType_MASTER) } _, err := res.ExecuteBatch(context.Background(), topodatapb.TabletType_MASTER, true, nil, nil, buildBatchRequest) @@ -665,76 +666,6 @@ func TestResolverExecBatchAsTransaction(t *testing.T) { func newTestResolver(hc discovery.HealthCheck, serv srvtopo.Server, cell string) *Resolver { sc := newTestScatterConn(hc, serv, cell) - return NewResolver(serv, cell, sc) -} - -func TestBoundKeyspaceIdQueriesToBoundShardQueries(t *testing.T) { - ts := new(sandboxTopo) - kid10 := []byte{0x10} - kid25 := []byte{0x25} - var testCases = []struct { - idQueries []*vtgatepb.BoundKeyspaceIdQuery - shardQueries []*vtgatepb.BoundShardQuery - }{ - { - idQueries: []*vtgatepb.BoundKeyspaceIdQuery{ - { - Query: &querypb.BoundQuery{ - Sql: "q1", - BindVariables: map[string]*querypb.BindVariable{ - "q1var": sqltypes.Int64BindVariable(1), - }, - }, - Keyspace: KsTestSharded, - KeyspaceIds: [][]byte{kid10, kid25}, - }, { - Query: &querypb.BoundQuery{ - Sql: "q2", - BindVariables: map[string]*querypb.BindVariable{ - "q2var": sqltypes.Int64BindVariable(2), - }, - }, - Keyspace: KsTestSharded, - KeyspaceIds: [][]byte{kid25, kid25}, - }, - }, - shardQueries: []*vtgatepb.BoundShardQuery{ - { - Query: &querypb.BoundQuery{ - Sql: "q1", - BindVariables: map[string]*querypb.BindVariable{ - "q1var": sqltypes.Int64BindVariable(1), - }, - }, - Keyspace: KsTestSharded, - Shards: []string{"-20", "20-40"}, - }, { - Query: &querypb.BoundQuery{ - Sql: "q2", - BindVariables: map[string]*querypb.BindVariable{ - "q2var": sqltypes.Int64BindVariable(2), - }, - }, - Keyspace: KsTestSharded, - Shards: []string{"20-40"}, - }, - }, - }, - } - - for _, testCase := range testCases { - shardQueries, err := boundKeyspaceIDQueriesToBoundShardQueries(context.Background(), ts, "", topodatapb.TabletType_MASTER, testCase.idQueries) - if err != nil { - t.Error(err) - } - // Sort shards, because they're random otherwise. - for _, shardQuery := range shardQueries { - sort.Strings(shardQuery.Shards) - } - got, _ := json.Marshal(shardQueries) - want, _ := json.Marshal(testCase.shardQueries) - if string(got) != string(want) { - t.Errorf("idQueries: %#v\nResponse: %s\nExpecting: %s", testCase.idQueries, string(got), string(want)) - } - } + srvResolver := srvtopo.NewResolver(serv, sc.gateway, cell) + return NewResolver(srvResolver, serv, cell, sc) } diff --git a/go/vt/vtgate/scatter_conn.go b/go/vt/vtgate/scatter_conn.go index 167102652d8..b2e5c4d776e 100644 --- a/go/vt/vtgate/scatter_conn.go +++ b/go/vt/vtgate/scatter_conn.go @@ -29,6 +29,7 @@ import ( "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/discovery" + "github.com/youtube/vitess/go/vt/srvtopo" "github.com/youtube/vitess/go/vt/topo/topoproto" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vtgate/gateway" @@ -61,6 +62,14 @@ type ScatterConn struct { // consolidating the results and errors for the caller. type shardActionFunc func(target *querypb.Target) error +// shardActionFunc2 defines the contract for a shard action +// outside of a transaction. Every such function executes the +// necessary action on a shard, sends the results to sResults, and +// return an error if any. multiGo is capable of executing +// multiple shardActionFunc actions in parallel and +// consolidating the results and errors for the caller. +type shardActionFunc2 func(rs *srvtopo.ResolvedShard, i int) error + // shardActionTransactionFunc defines the contract for a shard action // that may be in a transaction. Every such function executes the // necessary action on a shard (with an optional Begin call), aggregates @@ -70,6 +79,15 @@ type shardActionFunc func(target *querypb.Target) error // the results and errors for the caller. type shardActionTransactionFunc func(target *querypb.Target, shouldBegin bool, transactionID int64) (int64, error) +// shardActionTransactionFunc2 defines the contract for a shard action +// that may be in a transaction. Every such function executes the +// necessary action on a shard (with an optional Begin call), aggregates +// the results, and return an error if any. +// multiGoTransaction is capable of executing multiple +// shardActionTransactionFunc2 actions in parallel and consolidating +// the results and errors for the caller. +type shardActionTransactionFunc2 func(rs *srvtopo.ResolvedShard, i int, shouldBegin bool, transactionID int64) (int64, error) + // NewScatterConn creates a new ScatterConn. func NewScatterConn(statsName string, txConn *TxConn, gw gateway.Gateway, hc discovery.HealthCheck) *ScatterConn { tabletCallErrorCountStatsName := "" @@ -157,6 +175,53 @@ func (stc *ScatterConn) Execute( return qr, err } +// Execute2 executes a non-streaming query on the specified shards. +func (stc *ScatterConn) Execute2( + ctx context.Context, + query string, + bindVars map[string]*querypb.BindVariable, + rss []*srvtopo.ResolvedShard, + tabletType topodatapb.TabletType, + session *SafeSession, + notInTransaction bool, + options *querypb.ExecuteOptions, +) (*sqltypes.Result, error) { + + // mu protects qr + var mu sync.Mutex + qr := new(sqltypes.Result) + + err := stc.multiGoTransaction2( + ctx, + "Execute", + rss, + tabletType, + session, + notInTransaction, + func(rs *srvtopo.ResolvedShard, i int, shouldBegin bool, transactionID int64) (int64, error) { + var innerqr *sqltypes.Result + if shouldBegin { + var err error + innerqr, transactionID, err = rs.QueryService.BeginExecute(ctx, rs.Target, query, bindVars, options) + if err != nil { + return transactionID, err + } + } else { + var err error + innerqr, err = rs.QueryService.Execute(ctx, rs.Target, query, bindVars, transactionID, options) + if err != nil { + return transactionID, err + } + } + + mu.Lock() + defer mu.Unlock() + qr.AppendResult(innerqr) + return transactionID, nil + }) + return qr, err +} + // ExecuteMultiShard is like Execute, // but each shard gets its own Sql Queries and BindVariables. func (stc *ScatterConn) ExecuteMultiShard( @@ -234,9 +299,9 @@ func (stc *ScatterConn) executeAutocommit(ctx context.Context, target *querypb.T // ExecuteEntityIds executes queries that are shard specific. func (stc *ScatterConn) ExecuteEntityIds( ctx context.Context, - shards []string, - sqls map[string]string, - bindVars map[string]map[string]*querypb.BindVariable, + rss []*srvtopo.ResolvedShard, + sqls []string, + bindVars []map[string]*querypb.BindVariable, keyspace string, tabletType topodatapb.TabletType, session *SafeSession, @@ -248,27 +313,25 @@ func (stc *ScatterConn) ExecuteEntityIds( var mu sync.Mutex qr := new(sqltypes.Result) - err := stc.multiGoTransaction( + err := stc.multiGoTransaction2( ctx, "ExecuteEntityIds", - keyspace, - shards, + rss, tabletType, session, notInTransaction, - func(target *querypb.Target, shouldBegin bool, transactionID int64) (int64, error) { - sql := sqls[target.Shard] + func(rs *srvtopo.ResolvedShard, i int, shouldBegin bool, transactionID int64) (int64, error) { var innerqr *sqltypes.Result if shouldBegin { var err error - innerqr, transactionID, err = stc.gateway.BeginExecute(ctx, target, sql, bindVars[target.Shard], options) + innerqr, transactionID, err = rs.QueryService.BeginExecute(ctx, rs.Target, sqls[i], bindVars[i], options) if err != nil { return transactionID, err } } else { var err error - innerqr, err = stc.gateway.Execute(ctx, target, sql, bindVars[target.Shard], transactionID, options) + innerqr, err = rs.QueryService.Execute(ctx, rs.Target, sqls[i], bindVars[i], transactionID, options) if err != nil { return transactionID, err } @@ -289,34 +352,71 @@ func (stc *ScatterConn) ExecuteEntityIds( // list. In each request variable, the resultIndexes specifies the position // for each result from the shard. type scatterBatchRequest struct { - Length int - Requests map[string]*shardBatchRequest + // length is the total number of queries we have. + length int + // requests maps the 'keyspace:shard' key to the structure below. + requests map[string]*shardBatchRequest } type shardBatchRequest struct { - Queries []*querypb.BoundQuery - Keyspace, Shard string - ResultIndexes []int + // rs is the ResolvedShard to send the queries to. + rs *srvtopo.ResolvedShard + // queries are the queries to send to that ResolvedShard. + queries []*querypb.BoundQuery + // resultIndexes describes the index of the query and its results + // into the full original query array. + resultIndexes []int +} + +func boundShardQueriesToScatterBatchRequest(ctx context.Context, resolver *srvtopo.Resolver, boundQueries []*vtgatepb.BoundShardQuery, tabletType topodatapb.TabletType) (*scatterBatchRequest, error) { + requests := &scatterBatchRequest{ + length: len(boundQueries), + requests: make(map[string]*shardBatchRequest), + } + for i, boundQuery := range boundQueries { + rss, err := resolver.ResolveShards(ctx, boundQuery.Keyspace, boundQuery.Shards, tabletType) + if err != nil { + return nil, err + } + + for _, rs := range rss { + key := rs.Target.Keyspace + ":" + rs.Target.Shard + request := requests.requests[key] + if request == nil { + request = &shardBatchRequest{ + rs: rs, + } + requests.requests[key] = request + } + request.queries = append(request.queries, boundQuery.Query) + request.resultIndexes = append(request.resultIndexes, i) + } + } + return requests, nil } -func boundShardQueriesToScatterBatchRequest(boundQueries []*vtgatepb.BoundShardQuery) (*scatterBatchRequest, error) { +func boundKeyspaceIDQueriesToScatterBatchRequest(ctx context.Context, resolver *srvtopo.Resolver, boundQueries []*vtgatepb.BoundKeyspaceIdQuery, tabletType topodatapb.TabletType) (*scatterBatchRequest, error) { requests := &scatterBatchRequest{ - Length: len(boundQueries), - Requests: make(map[string]*shardBatchRequest), + length: len(boundQueries), + requests: make(map[string]*shardBatchRequest), } for i, boundQuery := range boundQueries { - for shard := range unique(boundQuery.Shards) { - key := boundQuery.Keyspace + ":" + shard - request := requests.Requests[key] + rss, err := resolver.ResolveKeyspaceIds(ctx, boundQuery.Keyspace, tabletType, boundQuery.KeyspaceIds) + if err != nil { + return nil, err + } + + for _, rs := range rss { + key := rs.Target.Keyspace + ":" + rs.Target.Shard + request := requests.requests[key] if request == nil { request = &shardBatchRequest{ - Keyspace: boundQuery.Keyspace, - Shard: shard, + rs: rs, } - requests.Requests[key] = request + requests.requests[key] = request } - request.Queries = append(request.Queries, boundQuery.Query) - request.ResultIndexes = append(request.ResultIndexes, i) + request.queries = append(request.queries, boundQuery.Query) + request.resultIndexes = append(request.resultIndexes, i) } } return requests, nil @@ -332,31 +432,25 @@ func (stc *ScatterConn) ExecuteBatch( options *querypb.ExecuteOptions) (qrs []sqltypes.Result, err error) { allErrors := new(concurrency.AllErrorRecorder) - results := make([]sqltypes.Result, batchRequest.Length) + results := make([]sqltypes.Result, batchRequest.length) var resMutex sync.Mutex var wg sync.WaitGroup - for _, req := range batchRequest.Requests { + for _, req := range batchRequest.requests { wg.Add(1) go func(req *shardBatchRequest) { defer wg.Done() - target := &querypb.Target{ - Keyspace: req.Keyspace, - Shard: req.Shard, - TabletType: tabletType, - } - var err error - startTime, statsKey := stc.startAction("ExecuteBatch", target) + startTime, statsKey := stc.startAction("ExecuteBatch", req.rs.Target) defer stc.endAction(startTime, allErrors, statsKey, &err, session) - shouldBegin, transactionID := transactionInfo(target, session, false) + shouldBegin, transactionID := transactionInfo(req.rs.Target, session, false) var innerqrs []sqltypes.Result if shouldBegin { - innerqrs, transactionID, err = stc.gateway.BeginExecuteBatch(ctx, target, req.Queries, asTransaction, options) + innerqrs, transactionID, err = req.rs.QueryService.BeginExecuteBatch(ctx, req.rs.Target, req.queries, asTransaction, options) if transactionID != 0 { if appendErr := session.Append(&vtgatepb.Session_ShardSession{ - Target: target, + Target: req.rs.Target, TransactionId: transactionID, }, stc.txConn.mode); appendErr != nil { err = appendErr @@ -366,7 +460,7 @@ func (stc *ScatterConn) ExecuteBatch( return } } else { - innerqrs, err = stc.gateway.ExecuteBatch(ctx, target, req.Queries, asTransaction, transactionID, options) + innerqrs, err = req.rs.QueryService.ExecuteBatch(ctx, req.rs.Target, req.queries, asTransaction, transactionID, options) if err != nil { return } @@ -375,7 +469,7 @@ func (stc *ScatterConn) ExecuteBatch( resMutex.Lock() defer resMutex.Unlock() for i, result := range innerqrs { - results[req.ResultIndexes[i]].AppendResult(&result) + results[req.resultIndexes[i]].AppendResult(&result) } }(req) } @@ -414,8 +508,7 @@ func (stc *ScatterConn) StreamExecute( ctx context.Context, query string, bindVars map[string]*querypb.BindVariable, - keyspace string, - shards []string, + rss []*srvtopo.ResolvedShard, tabletType topodatapb.TabletType, options *querypb.ExecuteOptions, callback func(reply *sqltypes.Result) error, @@ -425,8 +518,8 @@ func (stc *ScatterConn) StreamExecute( var mu sync.Mutex fieldSent := false - allErrors := stc.multiGo(ctx, "StreamExecute", keyspace, shards, tabletType, func(target *querypb.Target) error { - return stc.gateway.StreamExecute(ctx, target, query, bindVars, options, func(qr *sqltypes.Result) error { + allErrors := stc.multiGo2(ctx, "StreamExecute", rss, tabletType, func(rs *srvtopo.ResolvedShard, i int) error { + return rs.QueryService.StreamExecute(ctx, rs.Target, query, bindVars, options, func(qr *sqltypes.Result) error { return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) }) @@ -491,7 +584,7 @@ func (tt *timeTracker) Record(target *querypb.Target) time.Time { } // MessageStream streams messages from the specified shards. -func (stc *ScatterConn) MessageStream(ctx context.Context, keyspace string, shards []string, name string, callback func(*sqltypes.Result) error) error { +func (stc *ScatterConn) MessageStream(ctx context.Context, rss []*srvtopo.ResolvedShard, name string, callback func(*sqltypes.Result) error) error { // The cancelable context is used for handling errors // from individual streams. ctx, cancel := context.WithCancel(ctx) @@ -501,13 +594,13 @@ func (stc *ScatterConn) MessageStream(ctx context.Context, keyspace string, shar var mu sync.Mutex fieldSent := false lastErrors := newTimeTracker() - allErrors := stc.multiGo(ctx, "MessageStream", keyspace, shards, topodatapb.TabletType_MASTER, func(target *querypb.Target) error { + allErrors := stc.multiGo2(ctx, "MessageStream", rss, topodatapb.TabletType_MASTER, func(rs *srvtopo.ResolvedShard, i int) error { // This loop handles the case where a reparent happens, which can cause // an individual stream to end. If we don't succeed on the retries for // messageStreamGracePeriod, we abort and return an error. for { - err := stc.gateway.MessageStream(ctx, target, name, func(qr *sqltypes.Result) error { - lastErrors.Reset(target) + err := rs.QueryService.MessageStream(ctx, rs.Target, name, func(qr *sqltypes.Result) error { + lastErrors.Reset(rs.Target) return stc.processOneStreamingResult(&mu, &fieldSent, qr, callback) }) // nil and EOF are equivalent. UNAVAILABLE can be returned by vttablet if it's demoted @@ -525,11 +618,11 @@ func (stc *ScatterConn) MessageStream(ctx context.Context, keyspace string, shar return nil default: } - firstErrorTimeStamp := lastErrors.Record(target) + firstErrorTimeStamp := lastErrors.Record(rs.Target) if time.Now().Sub(firstErrorTimeStamp) >= *messageStreamGracePeriod { // Cancel all streams and return an error. cancel() - return vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "message stream from %v has repeatedly failed for longer than %v", target, *messageStreamGracePeriod) + return vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "message stream from %v has repeatedly failed for longer than %v", rs.Target, *messageStreamGracePeriod) } // It's not been too long since our last good send. Wait and retry. @@ -544,15 +637,11 @@ func (stc *ScatterConn) MessageStream(ctx context.Context, keyspace string, shar } // MessageAck acks messages across multiple shards. -func (stc *ScatterConn) MessageAck(ctx context.Context, keyspace string, shardIDs map[string][]*querypb.Value, name string) (int64, error) { +func (stc *ScatterConn) MessageAck(ctx context.Context, rss []*srvtopo.ResolvedShard, values [][]*querypb.Value, name string) (int64, error) { var mu sync.Mutex var totalCount int64 - shards := make([]string, 0, len(shardIDs)) - for shard := range shardIDs { - shards = append(shards, shard) - } - allErrors := stc.multiGo(ctx, "MessageAck", keyspace, shards, topodatapb.TabletType_MASTER, func(target *querypb.Target) error { - count, err := stc.gateway.MessageAck(ctx, target, name, shardIDs[target.Shard]) + allErrors := stc.multiGo2(ctx, "MessageAck", rss, topodatapb.TabletType_MASTER, func(rs *srvtopo.ResolvedShard, i int) error { + count, err := rs.QueryService.MessageAck(ctx, rs.Target, name, values[i]) if err != nil { return err } @@ -564,10 +653,10 @@ func (stc *ScatterConn) MessageAck(ctx context.Context, keyspace string, shardID return totalCount, allErrors.AggrError(vterrors.Aggregate) } -// UpdateStream just sends the query to the gateway, +// UpdateStream just sends the query to the ResolvedShard, // and sends the results back. -func (stc *ScatterConn) UpdateStream(ctx context.Context, target *querypb.Target, timestamp int64, position string, callback func(*querypb.StreamEvent) error) error { - return stc.gateway.UpdateStream(ctx, target, position, timestamp, callback) +func (stc *ScatterConn) UpdateStream(ctx context.Context, rs *srvtopo.ResolvedShard, timestamp int64, position string, callback func(*querypb.StreamEvent) error) error { + return rs.QueryService.UpdateStream(ctx, rs.Target, position, timestamp, callback) } // SplitQuery scatters a SplitQuery request to the shards whose names are given in 'shards'. @@ -583,10 +672,9 @@ func (stc *ScatterConn) SplitQuery( perShardSplitCount int64, numRowsPerQueryPart int64, algorithm querypb.SplitQueryRequest_Algorithm, - shards []string, + rss []*srvtopo.ResolvedShard, querySplitToQueryPartFunc func( - querySplit *querypb.QuerySplit, shard string) (*vtgatepb.SplitQueryResponse_Part, error), - keyspace string) ([]*vtgatepb.SplitQueryResponse_Part, error) { + querySplit *querypb.QuerySplit, rs *srvtopo.ResolvedShard) (*vtgatepb.SplitQueryResponse_Part, error)) ([]*vtgatepb.SplitQueryResponse_Part, error) { tabletType := topodatapb.TabletType_RDONLY // allParts will collect the query-parts from all the shards. It's protected @@ -594,21 +682,20 @@ func (stc *ScatterConn) SplitQuery( var allParts []*vtgatepb.SplitQueryResponse_Part var allPartsMutex sync.Mutex - allErrors := stc.multiGo( + allErrors := stc.multiGo2( ctx, "SplitQuery", - keyspace, - shards, + rss, tabletType, - func(target *querypb.Target) error { + func(rs *srvtopo.ResolvedShard, i int) error { // Get all splits from this shard query := &querypb.BoundQuery{ Sql: sql, BindVariables: bindVariables, } - querySplits, err := stc.gateway.SplitQuery( + querySplits, err := rs.QueryService.SplitQuery( ctx, - target, + rs.Target, query, splitColumns, perShardSplitCount, @@ -619,7 +706,7 @@ func (stc *ScatterConn) SplitQuery( } parts := make([]*vtgatepb.SplitQueryResponse_Part, len(querySplits)) for i, querySplit := range querySplits { - parts[i], err = querySplitToQueryPartFunc(querySplit, target.Shard) + parts[i], err = querySplitToQueryPartFunc(querySplit, rs) if err != nil { return err } @@ -740,6 +827,46 @@ func (stc *ScatterConn) multiGo( return allErrors } +// multiGo2 performs the requested 'action' on the specified +// shards in parallel. This does not handle any transaction state. +// The action function must match the shardActionFunc2 signature. +func (stc *ScatterConn) multiGo2( + ctx context.Context, + name string, + rss []*srvtopo.ResolvedShard, + tabletType topodatapb.TabletType, + action shardActionFunc2, +) (allErrors *concurrency.AllErrorRecorder) { + allErrors = new(concurrency.AllErrorRecorder) + if len(rss) == 0 { + return allErrors + } + + oneShard := func(rs *srvtopo.ResolvedShard, i int) { + var err error + startTime, statsKey := stc.startAction(name, rs.Target) + defer stc.endAction(startTime, allErrors, statsKey, &err, nil) + err = action(rs, i) + } + + if len(rss) == 1 { + // only one shard, do it synchronously. + oneShard(rss[0], 0) + return allErrors + } + + var wg sync.WaitGroup + for i, rs := range rss { + wg.Add(1) + go func(rs *srvtopo.ResolvedShard, i int) { + defer wg.Done() + oneShard(rs, i) + }(rs, i) + } + wg.Wait() + return allErrors +} + // multiGoTransaction performs the requested 'action' on the specified // shards in parallel. For each shard, if the requested // session is in a transaction, it opens a new transactions on the connection, @@ -812,6 +939,71 @@ end: return nil } +// multiGoTransaction2 performs the requested 'action' on the specified +// ResolvedShards in parallel. For each shard, if the requested +// session is in a transaction, it opens a new transactions on the connection, +// and updates the Session with the transaction id. If the session already +// contains a transaction id for the shard, it reuses it. +// The action function must match the shardActionTransactionFunc2 signature. +func (stc *ScatterConn) multiGoTransaction2( + ctx context.Context, + name string, + rss []*srvtopo.ResolvedShard, + tabletType topodatapb.TabletType, + session *SafeSession, + notInTransaction bool, + action shardActionTransactionFunc2, +) error { + if len(rss) == 0 { + return nil + } + + allErrors := new(concurrency.AllErrorRecorder) + oneShard := func(rs *srvtopo.ResolvedShard, i int) { + var err error + startTime, statsKey := stc.startAction(name, rs.Target) + defer stc.endAction(startTime, allErrors, statsKey, &err, session) + + shouldBegin, transactionID := transactionInfo(rs.Target, session, notInTransaction) + transactionID, err = action(rs, i, shouldBegin, transactionID) + if shouldBegin && transactionID != 0 { + if appendErr := session.Append(&vtgatepb.Session_ShardSession{ + Target: rs.Target, + TransactionId: transactionID, + }, stc.txConn.mode); appendErr != nil { + err = appendErr + } + } + } + + var wg sync.WaitGroup + if len(rss) == 1 { + // only one shard, do it synchronously. + for i, rs := range rss { + oneShard(rs, i) + goto end + } + } + + for i, rs := range rss { + wg.Add(1) + go func(rs *srvtopo.ResolvedShard, i int) { + defer wg.Done() + oneShard(rs, i) + }(rs, i) + } + wg.Wait() + +end: + if session.MustRollback() { + stc.txConn.Rollback(ctx, session) + } + if allErrors.HasErrors() { + return allErrors.AggrError(vterrors.Aggregate) + } + return nil +} + // transactionInfo looks at the current session, and returns: // - shouldBegin: if we should call 'Begin' to get a transactionID // - transactionID: the transactionID to use, or 0 if not in a transaction. diff --git a/go/vt/vtgate/scatter_conn_test.go b/go/vt/vtgate/scatter_conn_test.go index a7cd74e44a2..111975dd1a0 100644 --- a/go/vt/vtgate/scatter_conn_test.go +++ b/go/vt/vtgate/scatter_conn_test.go @@ -17,7 +17,6 @@ limitations under the License. package vtgate import ( - "encoding/json" "fmt" "reflect" "strings" @@ -71,7 +70,8 @@ func TestScatterConnExecuteBatch(t *testing.T) { Keyspace: "TestScatterConnExecuteBatch", Shards: shards, }} - scatterRequest, err := boundShardQueriesToScatterBatchRequest(queries) + res := srvtopo.NewResolver(&sandboxTopo{}, sc.gateway, "aa") + scatterRequest, err := boundShardQueriesToScatterBatchRequest(context.Background(), res, queries, topodatapb.TabletType_REPLICA) if err != nil { return nil, err } @@ -85,8 +85,14 @@ func TestScatterConnExecuteBatch(t *testing.T) { func TestScatterConnStreamExecute(t *testing.T) { testScatterConnGeneric(t, "TestScatterConnStreamExecute", func(sc *ScatterConn, shards []string) (*sqltypes.Result, error) { + res := srvtopo.NewResolver(&sandboxTopo{}, sc.gateway, "aa") + rss, err := res.ResolveShards(context.Background(), "TestScatterConnStreamExecute", shards, topodatapb.TabletType_REPLICA) + if err != nil { + return nil, err + } + qr := new(sqltypes.Result) - err := sc.StreamExecute(context.Background(), "query", nil, "TestScatterConnStreamExecute", shards, topodatapb.TabletType_REPLICA, nil, func(r *sqltypes.Result) error { + err = sc.StreamExecute(context.Background(), "query", nil, rss, topodatapb.TabletType_REPLICA, nil, func(r *sqltypes.Result) error { qr.AppendResult(r) return nil }) @@ -283,7 +289,12 @@ func TestScatterConnStreamExecuteSendError(t *testing.T) { hc := discovery.NewFakeHealthCheck() sc := newTestScatterConn(hc, new(sandboxTopo), "aa") hc.AddTestTablet("aa", "0", 1, "TestScatterConnStreamExecuteSendError", "0", topodatapb.TabletType_REPLICA, true, 1, nil) - err := sc.StreamExecute(context.Background(), "query", nil, "TestScatterConnStreamExecuteSendError", []string{"0"}, topodatapb.TabletType_REPLICA, nil, func(*sqltypes.Result) error { + res := srvtopo.NewResolver(&sandboxTopo{}, sc.gateway, "aa") + rss, err := res.ResolveShards(context.Background(), "TestScatterConnStreamExecuteSendError", []string{"0"}, topodatapb.TabletType_REPLICA) + if err != nil { + t.Fatalf("ResolveShards failed: %v", err) + } + err = sc.StreamExecute(context.Background(), "query", nil, rss, topodatapb.TabletType_REPLICA, nil, func(*sqltypes.Result) error { return fmt.Errorf("send error") }) want := "send error" @@ -572,137 +583,3 @@ func newTestScatterConn(hc discovery.HealthCheck, serv srvtopo.Server, cell stri tc := NewTxConn(gw, vtgatepb.TransactionMode_TWOPC) return NewScatterConn("", tc, gw, hc) } - -func TestBoundShardQueriesToScatterBatchRequest(t *testing.T) { - var testCases = []struct { - boundQueries []*vtgatepb.BoundShardQuery - requests *scatterBatchRequest - }{ - { - boundQueries: []*vtgatepb.BoundShardQuery{ - { - Query: &querypb.BoundQuery{ - Sql: "q1", - BindVariables: map[string]*querypb.BindVariable{ - "q1var": sqltypes.Int64BindVariable(1), - }, - }, - Keyspace: "ks1", - Shards: []string{"0", "1"}, - }, { - Query: &querypb.BoundQuery{ - Sql: "q2", - BindVariables: map[string]*querypb.BindVariable{ - "q2var": sqltypes.Int64BindVariable(2), - }, - }, - Keyspace: "ks1", - Shards: []string{"1"}, - }, { - Query: &querypb.BoundQuery{ - Sql: "q3", - BindVariables: map[string]*querypb.BindVariable{ - "q3var": sqltypes.Int64BindVariable(3), - }, - }, - Keyspace: "ks2", - Shards: []string{"1"}, - }, - }, - requests: &scatterBatchRequest{ - Length: 3, - Requests: map[string]*shardBatchRequest{ - "ks1:0": { - Queries: []*querypb.BoundQuery{ - { - Sql: "q1", - BindVariables: map[string]*querypb.BindVariable{ - "q1var": sqltypes.Int64BindVariable(1), - }, - }, - }, - Keyspace: "ks1", - Shard: "0", - ResultIndexes: []int{0}, - }, - "ks1:1": { - Queries: []*querypb.BoundQuery{ - { - Sql: "q1", - BindVariables: map[string]*querypb.BindVariable{ - "q1var": sqltypes.Int64BindVariable(1), - }, - }, { - Sql: "q2", - BindVariables: map[string]*querypb.BindVariable{ - "q2var": sqltypes.Int64BindVariable(2), - }, - }, - }, - Keyspace: "ks1", - Shard: "1", - ResultIndexes: []int{0, 1}, - }, - "ks2:1": { - Queries: []*querypb.BoundQuery{ - { - Sql: "q3", - BindVariables: map[string]*querypb.BindVariable{ - "q3var": sqltypes.Int64BindVariable(3), - }, - }, - }, - Keyspace: "ks2", - Shard: "1", - ResultIndexes: []int{2}, - }, - }, - }, - }, - { - boundQueries: []*vtgatepb.BoundShardQuery{ - { - Query: &querypb.BoundQuery{ - Sql: "q1", - BindVariables: map[string]*querypb.BindVariable{ - "q1var": sqltypes.Int64BindVariable(1), - }, - }, - Keyspace: "ks1", - Shards: []string{"0", "0"}, - }, - }, - requests: &scatterBatchRequest{ - Length: 1, - Requests: map[string]*shardBatchRequest{ - "ks1:0": { - Queries: []*querypb.BoundQuery{ - { - Sql: "q1", - BindVariables: map[string]*querypb.BindVariable{ - "q1var": sqltypes.Int64BindVariable(1), - }, - }, - }, - Keyspace: "ks1", - Shard: "0", - ResultIndexes: []int{0}, - }, - }, - }, - }, - } - - for _, testCase := range testCases { - scatterRequest, err := boundShardQueriesToScatterBatchRequest(testCase.boundQueries) - if err != nil { - t.Errorf("boundShardQueriesToScatterBatchRequest failed: %v", err) - continue - } - if !reflect.DeepEqual(testCase.requests, scatterRequest) { - got, _ := json.Marshal(scatterRequest) - want, _ := json.Marshal(testCase.requests) - t.Errorf("Bound Query: %#v\nResponse: %s\nExpecting: %s", testCase.boundQueries, got, want) - } - } -} diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index a2e642e22f3..d8159da7833 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -100,7 +100,7 @@ func (vc *vcursorImpl) DefaultKeyspace() (*vindexes.Keyspace, error) { return ks.Keyspace, nil } -// Execute performs a V3 level execution of the query. It does not take any routing directives. +// Execute performs a V3 level execution of the query. func (vc *vcursorImpl) Execute(method string, query string, BindVars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) { qr, err := vc.executor.Execute(vc.ctx, method, vc.safeSession, query+vc.trailingComments, BindVars) if err == nil { @@ -109,6 +109,15 @@ func (vc *vcursorImpl) Execute(method string, query string, BindVars map[string] return qr, err } +// ExecuteAutocommit performs a V3 level execution of the query in a separate autocommit session. +func (vc *vcursorImpl) ExecuteAutocommit(method string, query string, BindVars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) { + qr, err := vc.executor.Execute(vc.ctx, method, NewAutocommitSession(vc.safeSession.Session), query+vc.trailingComments, BindVars) + if err == nil { + vc.hasPartialDML = true + } + return qr, err +} + // ExecuteMultiShard executes different queries on different shards and returns the combined result. func (vc *vcursorImpl) ExecuteMultiShard(keyspace string, shardQueries map[string]*querypb.BoundQuery, isDML, canAutocommit bool) (*sqltypes.Result, error) { atomic.AddUint32(&vc.logStats.ShardQueries, uint32(len(shardQueries))) @@ -157,6 +166,21 @@ func (vc *vcursorImpl) GetShardForKeyspaceID(allShards []*topodatapb.ShardRefere return srvtopo.GetShardForKeyspaceID(allShards, keyspaceID) } +func (vc *vcursorImpl) GetShardsForKsids(allShards []*topodatapb.ShardReference, ksids vindexes.Ksids) ([]string, error) { + if ksids.Range != nil { + return srvtopo.GetShardsForKeyRange(allShards, ksids.Range), nil + } + var shards []string + for _, ksid := range ksids.IDs { + shard, err := srvtopo.GetShardForKeyspaceID(allShards, ksid) + if err != nil { + return nil, err + } + shards = append(shards, shard) + } + return shards, nil +} + func commentedShardQueries(shardQueries map[string]*querypb.BoundQuery, trailingComments string) map[string]*querypb.BoundQuery { if trailingComments == "" { return shardQueries diff --git a/go/vt/vtgate/vindexes/lookup.go b/go/vt/vtgate/vindexes/lookup.go index 549245bfdd2..86f9622720a 100644 --- a/go/vt/vtgate/vindexes/lookup.go +++ b/go/vt/vtgate/vindexes/lookup.go @@ -18,9 +18,11 @@ package vindexes import ( "encoding/json" + "errors" "fmt" "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/proto/topodata" ) var ( @@ -38,8 +40,9 @@ func init() { // LookupNonUnique defines a vindex that uses a lookup table and create a mapping between from ids and KeyspaceId. // It's NonUnique and a Lookup. type LookupNonUnique struct { - name string - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal } // String returns the name of the vindex. @@ -53,24 +56,42 @@ func (ln *LookupNonUnique) Cost() int { } // Map returns the corresponding KeyspaceId values for the given ids. -func (ln *LookupNonUnique) Map(vcursor VCursor, ids []sqltypes.Value) ([][][]byte, error) { - out := make([][][]byte, 0, len(ids)) +func (ln *LookupNonUnique) Map(vcursor VCursor, ids []sqltypes.Value) ([]Ksids, error) { + out := make([]Ksids, 0, len(ids)) + if ln.writeOnly { + for range ids { + out = append(out, Ksids{Range: &topodata.KeyRange{}}) + } + return out, nil + } + results, err := ln.lkp.Lookup(vcursor, ids) if err != nil { return nil, err } for _, result := range results { + if len(result.Rows) == 0 { + out = append(out, Ksids{}) + continue + } ksids := make([][]byte, 0, len(result.Rows)) for _, row := range result.Rows { ksids = append(ksids, row[0].ToBytes()) } - out = append(out, ksids) + out = append(out, Ksids{IDs: ksids}) } return out, nil } // Verify returns true if ids maps to ksids. func (ln *LookupNonUnique) Verify(vcursor VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { + if ln.writeOnly { + out := make([]bool, len(ids)) + for i := range ids { + out[i] = true + } + return out, nil + } return ln.lkp.Verify(vcursor, ids, ksidsToValues(ksids)) } @@ -84,15 +105,41 @@ func (ln *LookupNonUnique) Delete(vcursor VCursor, rowsColValues [][]sqltypes.Va return ln.lkp.Delete(vcursor, rowsColValues, sqltypes.MakeTrusted(sqltypes.VarBinary, ksid)) } +// Update updates the entry in the vindex table. +func (ln *LookupNonUnique) Update(vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { + return ln.lkp.Update(vcursor, oldValues, sqltypes.MakeTrusted(sqltypes.VarBinary, ksid), newValues) +} + // MarshalJSON returns a JSON representation of LookupHash. func (ln *LookupNonUnique) MarshalJSON() ([]byte, error) { return json.Marshal(ln.lkp) } // NewLookup creates a LookupNonUnique vindex. +// The supplied map has the following required fields: +// table: name of the backing table. It can be qualified by the keyspace. +// from: list of columns in the table that have the 'from' values of the lookup vindex. +// to: The 'to' column name of the table. +// +// The following fields are optional: +// autocommit: setting this to "true" will cause inserts to upsert and deletes to be ignored. +// write_only: in this mode, Map functions return the full keyrange causing a full scatter. func NewLookup(name string, m map[string]string) (Vindex, error) { lookup := &LookupNonUnique{name: name} - lookup.lkp.Init(m) + + autocommit, err := boolFromMap(m, "autocommit") + if err != nil { + return nil, err + } + lookup.writeOnly, err = boolFromMap(m, "write_only") + if err != nil { + return nil, err + } + + // if autocommit is on for non-unique lookup, upsert should also be on. + if err := lookup.lkp.Init(m, autocommit, autocommit /* upsert */); err != nil { + return nil, err + } return lookup, nil } @@ -115,9 +162,32 @@ type LookupUnique struct { } // NewLookupUnique creates a LookupUnique vindex. +// The supplied map has the following required fields: +// table: name of the backing table. It can be qualified by the keyspace. +// from: list of columns in the table that have the 'from' values of the lookup vindex. +// to: The 'to' column name of the table. +// +// The following fields are optional: +// autocommit: setting this to "true" will cause deletes to be ignored. func NewLookupUnique(name string, m map[string]string) (Vindex, error) { lu := &LookupUnique{name: name} - lu.lkp.Init(m) + + autocommit, err := boolFromMap(m, "autocommit") + if err != nil { + return nil, err + } + scatter, err := boolFromMap(m, "write_only") + if err != nil { + return nil, err + } + if scatter { + return nil, errors.New("write_only cannot be true for a unique lookup vindex") + } + + // Don't allow upserts for unique vindexes. + if err := lu.lkp.Init(m, autocommit, false /* upsert */); err != nil { + return nil, err + } return lu, nil } @@ -161,6 +231,11 @@ func (lu *LookupUnique) Create(vcursor VCursor, rowsColValues [][]sqltypes.Value return lu.lkp.Create(vcursor, rowsColValues, ksidsToValues(ksids), ignoreMode) } +// Update updates the entry in the vindex table. +func (lu *LookupUnique) Update(vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { + return lu.lkp.Update(vcursor, oldValues, sqltypes.MakeTrusted(sqltypes.VarBinary, ksid), newValues) +} + // Delete deletes the entry from the vindex table. func (lu *LookupUnique) Delete(vcursor VCursor, rowsColValues [][]sqltypes.Value, ksid []byte) error { return lu.lkp.Delete(vcursor, rowsColValues, sqltypes.MakeTrusted(sqltypes.VarBinary, ksid)) diff --git a/go/vt/vtgate/vindexes/lookup_hash.go b/go/vt/vtgate/vindexes/lookup_hash.go index ea567d7056d..26797c5eea3 100644 --- a/go/vt/vtgate/vindexes/lookup_hash.go +++ b/go/vt/vtgate/vindexes/lookup_hash.go @@ -18,9 +18,11 @@ package vindexes import ( "encoding/json" + "errors" "fmt" "github.com/youtube/vitess/go/sqltypes" + "github.com/youtube/vitess/go/vt/proto/topodata" ) var ( @@ -42,14 +44,36 @@ func init() { // NonUnique and a Lookup. // Warning: This Vindex is being depcreated in favor of Lookup type LookupHash struct { - name string - lkp lookupInternal + name string + writeOnly bool + lkp lookupInternal } // NewLookupHash creates a LookupHash vindex. +// The supplied map has the following required fields: +// table: name of the backing table. It can be qualified by the keyspace. +// from: list of columns in the table that have the 'from' values of the lookup vindex. +// to: The 'to' column name of the table. +// +// The following fields are optional: +// autocommit: setting this to "true" will cause inserts to upsert and deletes to be ignored. +// write_only: in this mode, Map functions return the full keyrange causing a full scatter. func NewLookupHash(name string, m map[string]string) (Vindex, error) { lh := &LookupHash{name: name} - lh.lkp.Init(m) + + autocommit, err := boolFromMap(m, "autocommit") + if err != nil { + return nil, err + } + lh.writeOnly, err = boolFromMap(m, "write_only") + if err != nil { + return nil, err + } + + // if autocommit is on for non-unique lookup, upsert should also be on. + if err := lh.lkp.Init(m, autocommit, autocommit /* upsert */); err != nil { + return nil, err + } return lh, nil } @@ -64,13 +88,24 @@ func (lh *LookupHash) Cost() int { } // Map returns the corresponding KeyspaceId values for the given ids. -func (lh *LookupHash) Map(vcursor VCursor, ids []sqltypes.Value) ([][][]byte, error) { - out := make([][][]byte, 0, len(ids)) +func (lh *LookupHash) Map(vcursor VCursor, ids []sqltypes.Value) ([]Ksids, error) { + out := make([]Ksids, 0, len(ids)) + if lh.writeOnly { + for range ids { + out = append(out, Ksids{Range: &topodata.KeyRange{}}) + } + return out, nil + } + results, err := lh.lkp.Lookup(vcursor, ids) if err != nil { return nil, err } for _, result := range results { + if len(result.Rows) == 0 { + out = append(out, Ksids{}) + continue + } ksids := make([][]byte, 0, len(result.Rows)) for _, row := range result.Rows { num, err := sqltypes.ToUint64(row[0]) @@ -81,13 +116,20 @@ func (lh *LookupHash) Map(vcursor VCursor, ids []sqltypes.Value) ([][][]byte, er } ksids = append(ksids, vhash(num)) } - out = append(out, ksids) + out = append(out, Ksids{IDs: ksids}) } return out, nil } // Verify returns true if ids maps to ksids. func (lh *LookupHash) Verify(vcursor VCursor, ids []sqltypes.Value, ksids [][]byte) ([]bool, error) { + if lh.writeOnly { + out := make([]bool, len(ids)) + for i := range ids { + out[i] = true + } + return out, nil + } values, err := unhashList(ksids) if err != nil { return nil, fmt.Errorf("lookup.Verify.vunhash: %v", err) @@ -104,6 +146,15 @@ func (lh *LookupHash) Create(vcursor VCursor, rowsColValues [][]sqltypes.Value, return lh.lkp.Create(vcursor, rowsColValues, values, ignoreMode) } +// Update updates the entry in the vindex table. +func (lh *LookupHash) Update(vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { + v, err := vunhash(ksid) + if err != nil { + return fmt.Errorf("lookup.Update.vunhash: %v", err) + } + return lh.lkp.Update(vcursor, oldValues, sqltypes.NewUint64(v), newValues) +} + // Delete deletes the entry from the vindex table. func (lh *LookupHash) Delete(vcursor VCursor, rowsColValues [][]sqltypes.Value, ksid []byte) error { v, err := vunhash(ksid) @@ -143,9 +194,32 @@ type LookupHashUnique struct { } // NewLookupHashUnique creates a LookupHashUnique vindex. +// The supplied map has the following required fields: +// table: name of the backing table. It can be qualified by the keyspace. +// from: list of columns in the table that have the 'from' values of the lookup vindex. +// to: The 'to' column name of the table. +// +// The following fields are optional: +// autocommit: setting this to "true" will cause deletes to be ignored. func NewLookupHashUnique(name string, m map[string]string) (Vindex, error) { lhu := &LookupHashUnique{name: name} - lhu.lkp.Init(m) + + autocommit, err := boolFromMap(m, "autocommit") + if err != nil { + return nil, err + } + scatter, err := boolFromMap(m, "write_only") + if err != nil { + return nil, err + } + if scatter { + return nil, errors.New("write_only cannot be true for a unique lookup vindex") + } + + // Don't allow upserts for unique vindexes. + if err := lhu.lkp.Init(m, autocommit, false /* upsert */); err != nil { + return nil, err + } return lhu, nil } @@ -211,6 +285,15 @@ func (lhu *LookupHashUnique) Delete(vcursor VCursor, rowsColValues [][]sqltypes. return lhu.lkp.Delete(vcursor, rowsColValues, sqltypes.NewUint64(v)) } +// Update updates the entry in the vindex table. +func (lhu *LookupHashUnique) Update(vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { + v, err := vunhash(ksid) + if err != nil { + return fmt.Errorf("lookup.Update.vunhash: %v", err) + } + return lhu.lkp.Update(vcursor, oldValues, sqltypes.NewUint64(v), newValues) +} + // MarshalJSON returns a JSON representation of LookupHashUnique. func (lhu *LookupHashUnique) MarshalJSON() ([]byte, error) { return json.Marshal(lhu.lkp) diff --git a/go/vt/vtgate/vindexes/lookup_hash_test.go b/go/vt/vtgate/vindexes/lookup_hash_test.go index 56c24cfaed3..0d43034c648 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_test.go @@ -22,25 +22,36 @@ import ( "testing" "github.com/youtube/vitess/go/sqltypes" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) -var lookuphash Vindex -var lookuphashunique Vindex +func TestLookupHashNew(t *testing.T) { + l := createLookup(t, "lookup_hash", false) + if want, got := l.(*LookupHash).writeOnly, false; got != want { + t.Errorf("Create(lookup, false): %v, want %v", got, want) + } -func init() { - lh, err := CreateVindex("lookup_hash", "lookup_hash", map[string]string{"table": "t", "from": "fromc", "to": "toc"}) - if err != nil { - panic(err) + l = createLookup(t, "lookup_hash", true) + if want, got := l.(*LookupHash).writeOnly, true; got != want { + t.Errorf("Create(lookup, false): %v, want %v", got, want) } - lu, err := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{"table": "t", "from": "fromc", "to": "toc"}) - if err != nil { - panic(err) + + l, err := CreateVindex("lookup_hash", "lookup_hash", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "write_only": "invalid", + }) + want := "write_only value must be 'true' or 'false': 'invalid'" + if err == nil || err.Error() != want { + t.Errorf("Create(bad_scatter): %v, want %s", err, want) } - lookuphash = lh - lookuphashunique = lu } func TestLookupHashCost(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) + lookuphashunique := createLookup(t, "lookup_hash_unique", false) + if lookuphash.Cost() != 20 { t.Errorf("Cost(): %d, want 20", lookuphash.Cost()) } @@ -50,6 +61,9 @@ func TestLookupHashCost(t *testing.T) { } func TestLookupHashString(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) + lookuphashunique := createLookup(t, "lookup_hash_unique", false) + if strings.Compare("lookup_hash", lookuphash.String()) != 0 { t.Errorf("String(): %s, want lookup_hash", lookuphash.String()) } @@ -59,17 +73,23 @@ func TestLookupHashString(t *testing.T) { } func TestLookupHashMap(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) vc := &vcursor{numRows: 2} + got, err := lookuphash.(NonUnique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) if err != nil { t.Error(err) } - want := [][][]byte{{ - []byte("\x16k@\xb4J\xbaK\xd6"), - []byte("\x06\xe7\xea\"Î’p\x8f"), + want := []Ksids{{ + IDs: [][]byte{ + []byte("\x16k@\xb4J\xbaK\xd6"), + []byte("\x06\xe7\xea\"Î’p\x8f"), + }, }, { - []byte("\x16k@\xb4J\xbaK\xd6"), - []byte("\x06\xe7\xea\"Î’p\x8f"), + IDs: [][]byte{ + []byte("\x16k@\xb4J\xbaK\xd6"), + []byte("\x06\xe7\xea\"Î’p\x8f"), + }, }} if !reflect.DeepEqual(got, want) { t.Errorf("Map(): %#v, want %+v", got, want) @@ -84,7 +104,7 @@ func TestLookupHashMap(t *testing.T) { if err != nil { t.Error(err) } - want = [][][]byte{{}} + want = []Ksids{{IDs: [][]byte{}}} if !reflect.DeepEqual(got, want) { t.Errorf("Map(): %#v, want %#v", got, want) } @@ -99,8 +119,39 @@ func TestLookupHashMap(t *testing.T) { vc.mustFail = false } +func TestLookupHashMapAbsent(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) + vc := &vcursor{numRows: 0} + + got, err := lookuphash.(NonUnique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + want := []Ksids{{}, {}} + if !reflect.DeepEqual(got, want) { + t.Errorf("Map(): %#v, want %+v", got, want) + } + + // writeOnly true should return full keyranges. + lookuphash = createLookup(t, "lookup_hash", true) + got, err = lookuphash.(NonUnique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + want = []Ksids{{ + Range: &topodatapb.KeyRange{}, + }, { + Range: &topodatapb.KeyRange{}, + }} + if !reflect.DeepEqual(got, want) { + t.Errorf("Map(): %#v, want %+v", got, want) + } +} + func TestLookupHashVerify(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) vc := &vcursor{numRows: 1} + // The check doesn't actually happen. But we give correct values // to avoid confusion. got, err := lookuphash.Verify(vc, @@ -129,11 +180,29 @@ func TestLookupHashVerify(t *testing.T) { if err == nil || err.Error() != wantErr { t.Errorf("lookuphash.Verify(bogus) err: %v, want %s", err, wantErr) } + + // writeOnly true should always yield true. + lookuphash = createLookup(t, "lookup_hash", true) + vc.queries = nil + + got, err = lookuphash.Verify(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) + if err != nil { + t.Error(err) + } + if vc.queries != nil { + t.Errorf("lookuphash.Verify(scatter), queries: %v, want nil", vc.queries) + } + wantBools := []bool{true, true} + if !reflect.DeepEqual(got, wantBools) { + t.Errorf("lookuphash.Verify(scatter): %v, want %v", got, wantBools) + } } func TestLookupHashCreate(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) vc := &vcursor{} - err := lookuphash.(Lookup).Create(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) + + err := lookuphash.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) if err != nil { t.Error(err) } @@ -141,7 +210,7 @@ func TestLookupHashCreate(t *testing.T) { t.Errorf("vc.queries length: %v, want %v", got, want) } - err = lookuphash.(Lookup).Create(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, [][]byte{[]byte("bogus")}, false /* ignoreMode */) + err = lookuphash.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("bogus")}, false /* ignoreMode */) want := "lookup.Create.vunhash: invalid keyspace id: 626f677573" if err == nil || err.Error() != want { t.Errorf("lookuphash.Create(bogus) err: %v, want %s", err, want) @@ -149,8 +218,10 @@ func TestLookupHashCreate(t *testing.T) { } func TestLookupHashDelete(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) vc := &vcursor{} - err := lookuphash.(Lookup).Delete(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + + err := lookuphash.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) if err != nil { t.Error(err) } @@ -158,9 +229,22 @@ func TestLookupHashDelete(t *testing.T) { t.Errorf("vc.queries length: %v, want %v", got, want) } - err = lookuphash.(Lookup).Delete(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, []byte("bogus")) + err = lookuphash.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("bogus")) want := "lookup.Delete.vunhash: invalid keyspace id: 626f677573" if err == nil || err.Error() != want { t.Errorf("lookuphash.Delete(bogus) err: %v, want %s", err, want) } } + +func TestLookupHashUpdate(t *testing.T) { + lookuphash := createLookup(t, "lookup_hash", false) + vc := &vcursor{} + + err := lookuphash.(Lookup).Update(vc, []sqltypes.Value{sqltypes.NewInt64(1)}, []byte("\x16k@\xb4J\xbaK\xd6"), []sqltypes.Value{sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + if got, want := len(vc.queries), 2; got != want { + t.Errorf("vc.queries length: %v, want %v", got, want) + } +} diff --git a/go/vt/vtgate/vindexes/lookup_hash_unique_test.go b/go/vt/vtgate/vindexes/lookup_hash_unique_test.go index 7a6b4a9cef7..80d900f7ee1 100644 --- a/go/vt/vtgate/vindexes/lookup_hash_unique_test.go +++ b/go/vt/vtgate/vindexes/lookup_hash_unique_test.go @@ -23,24 +23,43 @@ import ( "github.com/youtube/vitess/go/sqltypes" ) -var lhu Vindex +func TestLookupHashUniqueNew(t *testing.T) { + _ = createLookup(t, "lookup_hash_unique", false) + + _, err := CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "write_only": "true", + }) + want := "write_only cannot be true for a unique lookup vindex" + if err == nil || err.Error() != want { + t.Errorf("Create(bad_scatter): %v, want %s", err, want) + } -func init() { - h, err := CreateVindex("lookup_hash_unique", "nn", map[string]string{"table": "t", "from": "fromc", "to": "toc"}) - if err != nil { - panic(err) + _, err = CreateVindex("lookup_hash_unique", "lookup_hash_unique", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "write_only": "invalid", + }) + want = "write_only value must be 'true' or 'false': 'invalid'" + if err == nil || err.Error() != want { + t.Errorf("Create(bad_scatter): %v, want %s", err, want) } - lhu = h } func TestLookupHashUniqueCost(t *testing.T) { + lhu := createLookup(t, "lookup_hash_unique", false) if lhu.Cost() != 10 { t.Errorf("Cost(): %d, want 10", lhu.Cost()) } } func TestLookupHashUniqueMap(t *testing.T) { + lhu := createLookup(t, "lookup_hash_unique", false) vc := &vcursor{numRows: 1} + got, err := lhu.(Unique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) if err != nil { t.Error(err) @@ -95,7 +114,9 @@ func TestLookupHashUniqueMap(t *testing.T) { } func TestLookupHashUniqueVerify(t *testing.T) { + lhu := createLookup(t, "lookup_hash_unique", false) vc := &vcursor{numRows: 1} + // The check doesn't actually happen. But we give correct values // to avoid confusion. got, err := lhu.Verify(vc, @@ -127,8 +148,10 @@ func TestLookupHashUniqueVerify(t *testing.T) { } func TestLookupHashUniqueCreate(t *testing.T) { + lhu := createLookup(t, "lookup_hash_unique", false) vc := &vcursor{} - err := lhu.(Lookup).Create(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) + + err := lhu.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) if err != nil { t.Error(err) } @@ -136,7 +159,7 @@ func TestLookupHashUniqueCreate(t *testing.T) { t.Errorf("vc.queries length: %v, want %v", got, want) } - err = lhu.(Lookup).Create(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, [][]byte{[]byte("bogus")}, false /* ignoreMode */) + err = lhu.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("bogus")}, false /* ignoreMode */) want := "lookup.Create.vunhash: invalid keyspace id: 626f677573" if err == nil || err.Error() != want { t.Errorf("lhu.Create(bogus) err: %v, want %s", err, want) @@ -144,8 +167,10 @@ func TestLookupHashUniqueCreate(t *testing.T) { } func TestLookupHashUniqueDelete(t *testing.T) { + lhu := createLookup(t, "lookup_hash_unique", false) vc := &vcursor{} - err := lhu.(Lookup).Delete(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + + err := lhu.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) if err != nil { t.Error(err) } @@ -153,9 +178,22 @@ func TestLookupHashUniqueDelete(t *testing.T) { t.Errorf("vc.queries length: %v, want %v", got, want) } - err = lhu.(Lookup).Delete(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, []byte("bogus")) + err = lhu.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("bogus")) want := "lookup.Delete.vunhash: invalid keyspace id: 626f677573" if err == nil || err.Error() != want { t.Errorf("lhu.Delete(bogus) err: %v, want %s", err, want) } } + +func TestLookupHashUniqueUpdate(t *testing.T) { + lhu := createLookup(t, "lookup_hash_unique", false) + vc := &vcursor{} + + err := lhu.(Lookup).Update(vc, []sqltypes.Value{sqltypes.NewInt64(1)}, []byte("\x16k@\xb4J\xbaK\xd6"), []sqltypes.Value{sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + if got, want := len(vc.queries), 2; got != want { + t.Errorf("vc.queries length: %v, want %v", got, want) + } +} diff --git a/go/vt/vtgate/vindexes/lookup_internal.go b/go/vt/vtgate/vindexes/lookup_internal.go index 80797938d77..84afb94b218 100644 --- a/go/vt/vtgate/vindexes/lookup_internal.go +++ b/go/vt/vtgate/vindexes/lookup_internal.go @@ -32,10 +32,12 @@ type lookupInternal struct { Table string `json:"table"` FromColumns []string `json:"from_columns"` To string `json:"to"` + Autocommit bool `json:"autocommit,omitempty"` + Upsert bool `json:"upsert,omitempty"` sel, ver, del string } -func (lkp *lookupInternal) Init(lookupQueryParams map[string]string) { +func (lkp *lookupInternal) Init(lookupQueryParams map[string]string, autocommit, upsert bool) error { lkp.Table = lookupQueryParams["table"] lkp.To = lookupQueryParams["to"] var fromColumns []string @@ -44,12 +46,16 @@ func (lkp *lookupInternal) Init(lookupQueryParams map[string]string) { } lkp.FromColumns = fromColumns + lkp.Autocommit = autocommit + lkp.Upsert = upsert + // TODO @rafael: update sel and ver to support multi column vindexes. This will be done // as part of face 2 of https://github.com/youtube/vitess/issues/3481 // For now multi column behaves as a single column for Map and Verify operations lkp.sel = fmt.Sprintf("select %s from %s where %s = :%s", lkp.To, lkp.Table, lkp.FromColumns[0], lkp.FromColumns[0]) lkp.ver = fmt.Sprintf("select %s from %s where %s = :%s and %s = :%s", lkp.FromColumns[0], lkp.Table, lkp.FromColumns[0], lkp.FromColumns[0], lkp.To, lkp.To) - lkp.del = lkp.initDelStm() + lkp.del = lkp.initDelStmt() + return nil } // Lookup performs a lookup for the ids. @@ -59,7 +65,13 @@ func (lkp *lookupInternal) Lookup(vcursor VCursor, ids []sqltypes.Value) ([]*sql bindVars := map[string]*querypb.BindVariable{ lkp.FromColumns[0]: sqltypes.ValueBindVariable(id), } - result, err := vcursor.Execute("VindexLookup", lkp.sel, bindVars, false /* isDML */) + var err error + var result *sqltypes.Result + if lkp.Autocommit { + result, err = vcursor.ExecuteAutocommit("VindexLookup", lkp.sel, bindVars, false /* isDML */) + } else { + result, err = vcursor.Execute("VindexLookup", lkp.sel, bindVars, false /* isDML */) + } if err != nil { return nil, fmt.Errorf("lookup.Map: %v", err) } @@ -76,7 +88,13 @@ func (lkp *lookupInternal) Verify(vcursor VCursor, ids, values []sqltypes.Value) lkp.FromColumns[0]: sqltypes.ValueBindVariable(id), lkp.To: sqltypes.ValueBindVariable(values[i]), } - result, err := vcursor.Execute("VindexVerify", lkp.ver, bindVars, true /* isDML */) + var err error + var result *sqltypes.Result + if lkp.Autocommit { + result, err = vcursor.ExecuteAutocommit("VindexVerify", lkp.ver, bindVars, true /* isDML */) + } else { + result, err = vcursor.Execute("VindexVerify", lkp.ver, bindVars, true /* isDML */) + } if err != nil { return nil, fmt.Errorf("lookup.Verify: %v", err) } @@ -91,43 +109,56 @@ func (lkp *lookupInternal) Verify(vcursor VCursor, ids, values []sqltypes.Value) // toValues contains the keyspace_id of each row being inserted. // Given a vindex with two columns and the following insert: // -// INSERT INTO table_a (colum_a, column_b, column_c) VALUES (value_a1, value_b1, value_c1), (value_a2, value_b2, value_c2); +// INSERT INTO table_a (colum_a, column_b, column_c) VALUES (value_a0, value_b0, value_c0), (value_a1, value_b1, value_c1); // If we assume that the primary vindex is on column_c. The call to create will look like this: -// Create(vcursor, [[value_a1, value_b1,], [value_a2, value_b2]], [binary(value_c1), binary(value_c2)]) +// Create(vcursor, [[value_a0, value_b0,], [value_a1, value_b1]], [binary(value_c0), binary(value_c1)]) // Notice that toValues contains the computed binary value of the keyspace_id. func (lkp *lookupInternal) Create(vcursor VCursor, rowsColValues [][]sqltypes.Value, toValues []sqltypes.Value, ignoreMode bool) error { - var insBuffer bytes.Buffer + buf := new(bytes.Buffer) if ignoreMode { - fmt.Fprintf(&insBuffer, "insert ignore into %s(", lkp.Table) + fmt.Fprintf(buf, "insert ignore into %s(", lkp.Table) } else { - fmt.Fprintf(&insBuffer, "insert into %s(", lkp.Table) + fmt.Fprintf(buf, "insert into %s(", lkp.Table) } for _, col := range lkp.FromColumns { - fmt.Fprintf(&insBuffer, "%s, ", col) - + fmt.Fprintf(buf, "%s, ", col) } + fmt.Fprintf(buf, "%s) values(", lkp.To) - fmt.Fprintf(&insBuffer, "%s) values(", lkp.To) bindVars := make(map[string]*querypb.BindVariable, 2*len(rowsColValues)) for rowIdx := range toValues { colIds := rowsColValues[rowIdx] if rowIdx != 0 { - insBuffer.WriteString(", (") + buf.WriteString(", (") } for colIdx, colID := range colIds { fromStr := lkp.FromColumns[colIdx] + strconv.Itoa(rowIdx) bindVars[fromStr] = sqltypes.ValueBindVariable(colID) - insBuffer.WriteString(":" + fromStr + ", ") + buf.WriteString(":" + fromStr + ", ") } toStr := lkp.To + strconv.Itoa(rowIdx) - insBuffer.WriteString(":" + toStr + ")") + buf.WriteString(":" + toStr + ")") bindVars[toStr] = sqltypes.ValueBindVariable(toValues[rowIdx]) } - _, err := vcursor.Execute("VindexCreate", insBuffer.String(), bindVars, true /* isDML */) + + if lkp.Upsert { + fmt.Fprintf(buf, " on duplicate key update ") + for _, col := range lkp.FromColumns { + fmt.Fprintf(buf, "%s=values(%s), ", col, col) + } + fmt.Fprintf(buf, "%s=values(%s)", lkp.To, lkp.To) + } + + var err error + if lkp.Autocommit { + _, err = vcursor.ExecuteAutocommit("VindexCreate", buf.String(), bindVars, true /* isDML */) + } else { + _, err = vcursor.Execute("VindexCreate", buf.String(), bindVars, true /* isDML */) + } if err != nil { return fmt.Errorf("lookup.Create: %v", err) } - return err + return nil } // Delete deletes the association between ids and value. @@ -137,7 +168,7 @@ func (lkp *lookupInternal) Create(vcursor VCursor, rowsColValues [][]sqltypes.Va // // Given the following information in a vindex table with two columns: // -// +------------------+-----------+--------+ +// +------------------+-----------+--------+ // | hex(keyspace_id) | a | b | // +------------------+-----------+--------+ // | 52CB7B1B31B2222E | valuea | valueb | @@ -146,6 +177,10 @@ func (lkp *lookupInternal) Create(vcursor VCursor, rowsColValues [][]sqltypes.Va // A call to Delete would look like this: // Delete(vcursor, [[valuea, valueb]], 52CB7B1B31B2222E) func (lkp *lookupInternal) Delete(vcursor VCursor, rowsColValues [][]sqltypes.Value, value sqltypes.Value) error { + // In autocommit mode, it's not safe to delete. So, it's a no-op. + if lkp.Autocommit { + return nil + } for _, column := range rowsColValues { bindVars := make(map[string]*querypb.BindVariable, len(rowsColValues)) for colIdx, columnValue := range column { @@ -160,7 +195,15 @@ func (lkp *lookupInternal) Delete(vcursor VCursor, rowsColValues [][]sqltypes.Va return nil } -func (lkp *lookupInternal) initDelStm() string { +// Update implements the update functionality. +func (lkp *lookupInternal) Update(vcursor VCursor, oldValues []sqltypes.Value, ksid sqltypes.Value, newValues []sqltypes.Value) error { + if err := lkp.Delete(vcursor, [][]sqltypes.Value{oldValues}, ksid); err != nil { + return err + } + return lkp.Create(vcursor, [][]sqltypes.Value{newValues}, []sqltypes.Value{ksid}, false /* ignoreMode */) +} + +func (lkp *lookupInternal) initDelStmt() string { var delBuffer bytes.Buffer fmt.Fprintf(&delBuffer, "delete from %s where ", lkp.Table) for colIdx, column := range lkp.FromColumns { @@ -172,3 +215,18 @@ func (lkp *lookupInternal) initDelStm() string { delBuffer.WriteString(" and " + lkp.To + " = :" + lkp.To) return delBuffer.String() } + +func boolFromMap(m map[string]string, key string) (bool, error) { + val, ok := m[key] + if !ok { + return false, nil + } + switch val { + case "true": + return true, nil + case "false": + return false, nil + default: + return false, fmt.Errorf("%s value must be 'true' or 'false': '%s'", key, val) + } +} diff --git a/go/vt/vtgate/vindexes/lookup_test.go b/go/vt/vtgate/vindexes/lookup_test.go index 3f9fca1baeb..4b72862fbf1 100644 --- a/go/vt/vtgate/vindexes/lookup_test.go +++ b/go/vt/vtgate/vindexes/lookup_test.go @@ -26,19 +26,30 @@ import ( "github.com/youtube/vitess/go/sqltypes" querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) // LookupNonUnique tests are more comprehensive than others. // They also test lookupInternal functionality. type vcursor struct { - mustFail bool - numRows int - result *sqltypes.Result - queries []*querypb.BoundQuery + mustFail bool + numRows int + result *sqltypes.Result + queries []*querypb.BoundQuery + autocommits int } func (vc *vcursor) Execute(method string, query string, bindvars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) { + return vc.execute(method, query, bindvars, isDML) +} + +func (vc *vcursor) ExecuteAutocommit(method string, query string, bindvars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) { + vc.autocommits++ + return vc.execute(method, query, bindvars, isDML) +} + +func (vc *vcursor) execute(method string, query string, bindvars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) { vc.queries = append(vc.queries, &querypb.BoundQuery{ Sql: query, BindVariables: bindvars, @@ -69,47 +80,61 @@ func (vc *vcursor) Execute(method string, query string, bindvars map[string]*que panic("unexpected") } -var lookupUnique Vindex -var lookupNonUnique Vindex - -func init() { - lkpunique, err := CreateVindex("lookup_unique", "lookupUnique", map[string]string{"table": "t", "from": "fromc", "to": "toc"}) - if err != nil { - panic(err) +func TestLookupNonUniqueNew(t *testing.T) { + l := createLookup(t, "lookup", false) + if want, got := l.(*LookupNonUnique).writeOnly, false; got != want { + t.Errorf("Create(lookup, false): %v, want %v", got, want) } - lkpnonunique, err := CreateVindex("lookup", "lookupNonUnique", map[string]string{"table": "t", "from": "fromc", "to": "toc"}) - if err != nil { - panic(err) + + l = createLookup(t, "lookup", true) + if want, got := l.(*LookupNonUnique).writeOnly, true; got != want { + t.Errorf("Create(lookup, false): %v, want %v", got, want) } - lookupUnique = lkpunique - lookupNonUnique = lkpnonunique + l, err := CreateVindex("lookup", "lookup", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "write_only": "invalid", + }) + want := "write_only value must be 'true' or 'false': 'invalid'" + if err == nil || err.Error() != want { + t.Errorf("Create(bad_scatter): %v, want %s", err, want) + } } func TestLookupNonUniqueCost(t *testing.T) { + lookupNonUnique := createLookup(t, "lookup", false) if lookupNonUnique.Cost() != 20 { - t.Errorf("Cost(): %d, want 20", lookupUnique.Cost()) + t.Errorf("Cost(): %d, want 20", lookupNonUnique.Cost()) } } func TestLookupNonUniqueString(t *testing.T) { - if strings.Compare("lookupNonUnique", lookupNonUnique.String()) != 0 { - t.Errorf("String(): %s, want lookupNonUnique", lookupNonUnique.String()) + lookupNonUnique := createLookup(t, "lookup", false) + if strings.Compare("lookup", lookupNonUnique.String()) != 0 { + t.Errorf("String(): %s, want lookup", lookupNonUnique.String()) } } func TestLookupNonUniqueMap(t *testing.T) { + lookupNonUnique := createLookup(t, "lookup", false) vc := &vcursor{numRows: 2} + got, err := lookupNonUnique.(NonUnique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) if err != nil { t.Error(err) } - want := [][][]byte{{ - []byte("1"), - []byte("2"), + want := []Ksids{{ + IDs: [][]byte{ + []byte("1"), + []byte("2"), + }, }, { - []byte("1"), - []byte("2"), + IDs: [][]byte{ + []byte("1"), + []byte("2"), + }, }} if !reflect.DeepEqual(got, want) { t.Errorf("Map(): %#v, want %+v", got, want) @@ -140,8 +165,90 @@ func TestLookupNonUniqueMap(t *testing.T) { vc.mustFail = false } +func TestLookupNonUniqueMapAutocommit(t *testing.T) { + lookupNonUnique, err := CreateVindex("lookup", "lookup", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "autocommit": "true", + }) + if err != nil { + t.Fatal(err) + } + vc := &vcursor{numRows: 2} + + got, err := lookupNonUnique.(NonUnique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + want := []Ksids{{ + IDs: [][]byte{ + []byte("1"), + []byte("2"), + }, + }, { + IDs: [][]byte{ + []byte("1"), + []byte("2"), + }, + }} + if !reflect.DeepEqual(got, want) { + t.Errorf("Map(): %#v, want %+v", got, want) + } + + wantqueries := []*querypb.BoundQuery{{ + Sql: "select toc from t where fromc = :fromc", + BindVariables: map[string]*querypb.BindVariable{ + "fromc": sqltypes.Int64BindVariable(1), + }, + }, { + Sql: "select toc from t where fromc = :fromc", + BindVariables: map[string]*querypb.BindVariable{ + "fromc": sqltypes.Int64BindVariable(2), + }, + }} + if !reflect.DeepEqual(vc.queries, wantqueries) { + t.Errorf("lookup.Map queries:\n%v, want\n%v", vc.queries, wantqueries) + } + + if got, want := vc.autocommits, 2; got != want { + t.Errorf("Create(autocommit) count: %d, want %d", got, want) + } +} + +func TestLookupNonUniqueMapAbsent(t *testing.T) { + lookupNonUnique := createLookup(t, "lookup", false) + vc := &vcursor{numRows: 0} + + got, err := lookupNonUnique.(NonUnique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + want := []Ksids{{}, {}} + if !reflect.DeepEqual(got, want) { + t.Errorf("Map(): %#v, want %+v", got, want) + } + + // writeOnly true should return full keyranges. + lookupNonUnique = createLookup(t, "lookup", true) + got, err = lookupNonUnique.(NonUnique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + want = []Ksids{{ + Range: &topodatapb.KeyRange{}, + }, { + Range: &topodatapb.KeyRange{}, + }} + if !reflect.DeepEqual(got, want) { + t.Errorf("Map(): %#v, want %+v", got, want) + } +} + func TestLookupNonUniqueVerify(t *testing.T) { + lookupNonUnique := createLookup(t, "lookup", false) vc := &vcursor{numRows: 1} + _, err := lookupNonUnique.Verify(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) if err != nil { t.Error(err) @@ -172,11 +279,68 @@ func TestLookupNonUniqueVerify(t *testing.T) { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) } vc.mustFail = false + + // writeOnly true should always yield true. + lookupNonUnique = createLookup(t, "lookup", true) + vc.queries = nil + + got, err := lookupNonUnique.Verify(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte(""), []byte("")}) + if err != nil { + t.Error(err) + } + if vc.queries != nil { + t.Errorf("lookup.Verify(writeOnly), queries: %v, want nil", vc.queries) + } + wantBools := []bool{true, true} + if !reflect.DeepEqual(got, wantBools) { + t.Errorf("lookup.Verify(writeOnly): %v, want %v", got, wantBools) + } +} + +func TestLookupNonUniqueVerifyAutocommit(t *testing.T) { + lookupNonUnique, err := CreateVindex("lookup", "lookup", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "autocommit": "true", + }) + if err != nil { + t.Fatal(err) + } + vc := &vcursor{numRows: 1} + + _, err = lookupNonUnique.Verify(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}, [][]byte{[]byte("test1"), []byte("test2")}) + if err != nil { + t.Error(err) + } + + wantqueries := []*querypb.BoundQuery{{ + Sql: "select fromc from t where fromc = :fromc and toc = :toc", + BindVariables: map[string]*querypb.BindVariable{ + "fromc": sqltypes.Int64BindVariable(1), + "toc": sqltypes.BytesBindVariable([]byte("test1")), + }, + }, { + Sql: "select fromc from t where fromc = :fromc and toc = :toc", + BindVariables: map[string]*querypb.BindVariable{ + "fromc": sqltypes.Int64BindVariable(2), + "toc": sqltypes.BytesBindVariable([]byte("test2")), + }, + }} + if !reflect.DeepEqual(vc.queries, wantqueries) { + t.Errorf("lookup.Verify queries:\n%v, want\n%v", vc.queries, wantqueries) + } + + if got, want := vc.autocommits, 2; got != want { + t.Errorf("Create(autocommit) count: %d, want %d", got, want) + } } func TestLookupNonUniqueCreate(t *testing.T) { + lookupNonUnique := createLookup(t, "lookup", false) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Create(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}, []sqltypes.Value{sqltypes.NewInt64(2)}}, [][]byte{[]byte("test1"), []byte("test2")}, false /* ignoreMode */) + + err := lookupNonUnique.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, [][]byte{[]byte("test1"), []byte("test2")}, false /* ignoreMode */) if err != nil { t.Error(err) } @@ -196,7 +360,7 @@ func TestLookupNonUniqueCreate(t *testing.T) { // With ignore. vc.queries = nil - err = lookupNonUnique.(Lookup).Create(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}, []sqltypes.Value{sqltypes.NewInt64(2)}}, [][]byte{[]byte("test1"), []byte("test2")}, true /* ignoreMode */) + err = lookupNonUnique.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, [][]byte{[]byte("test1"), []byte("test2")}, true /* ignoreMode */) if err != nil { t.Error(err) } @@ -208,7 +372,7 @@ func TestLookupNonUniqueCreate(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Create(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) + err = lookupNonUnique.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("\x16k@\xb4J\xbaK\xd6")}, false /* ignoreMode */) want := "lookup.Create: execute failed" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) @@ -216,9 +380,56 @@ func TestLookupNonUniqueCreate(t *testing.T) { vc.mustFail = false } +func TestLookupNonUniqueCreateAutocommit(t *testing.T) { + lookupNonUnique, err := CreateVindex("lookup", "lookup", map[string]string{ + "table": "t", + "from": "from1,from2", + "to": "toc", + "autocommit": "true", + }) + if err != nil { + t.Fatal(err) + } + vc := &vcursor{} + + err = lookupNonUnique.(Lookup).Create( + vc, + [][]sqltypes.Value{{ + sqltypes.NewInt64(1), sqltypes.NewInt64(2), + }, { + sqltypes.NewInt64(3), sqltypes.NewInt64(4), + }}, + [][]byte{[]byte("test1"), []byte("test2")}, + false /* ignoreMode */) + if err != nil { + t.Error(err) + } + + wantqueries := []*querypb.BoundQuery{{ + Sql: "insert into t(from1, from2, toc) values(:from10, :from20, :toc0), (:from11, :from21, :toc1) on duplicate key update from1=values(from1), from2=values(from2), toc=values(toc)", + BindVariables: map[string]*querypb.BindVariable{ + "from10": sqltypes.Int64BindVariable(1), + "from20": sqltypes.Int64BindVariable(2), + "toc0": sqltypes.BytesBindVariable([]byte("test1")), + "from11": sqltypes.Int64BindVariable(3), + "from21": sqltypes.Int64BindVariable(4), + "toc1": sqltypes.BytesBindVariable([]byte("test2")), + }, + }} + if !reflect.DeepEqual(vc.queries, wantqueries) { + t.Errorf("lookup.Create queries:\n%v, want\n%v", vc.queries, wantqueries) + } + + if got, want := vc.autocommits, 1; got != want { + t.Errorf("Create(autocommit) count: %d, want %d", got, want) + } +} + func TestLookupNonUniqueDelete(t *testing.T) { + lookupNonUnique := createLookup(t, "lookup", false) vc := &vcursor{} - err := lookupNonUnique.(Lookup).Delete(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}, []sqltypes.Value{sqltypes.NewInt64(2)}}, []byte("test")) + + err := lookupNonUnique.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) if err != nil { t.Error(err) } @@ -242,10 +453,75 @@ func TestLookupNonUniqueDelete(t *testing.T) { // Test query fail. vc.mustFail = true - err = lookupNonUnique.(Lookup).Delete(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) + err = lookupNonUnique.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("\x16k@\xb4J\xbaK\xd6")) want := "lookup.Delete: execute failed" if err == nil || err.Error() != want { t.Errorf("lookupNonUnique(query fail) err: %v, want %s", err, want) } vc.mustFail = false } + +func TestLookupNonUniqueDeleteAutocommit(t *testing.T) { + lookupNonUnique, err := CreateVindex("lookup", "lookup", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "autocommit": "true", + }) + vc := &vcursor{} + + err = lookupNonUnique.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}, {sqltypes.NewInt64(2)}}, []byte("test")) + if err != nil { + t.Error(err) + } + + wantqueries := []*querypb.BoundQuery(nil) + if !reflect.DeepEqual(vc.queries, wantqueries) { + t.Errorf("lookup.Delete queries:\n%v, want\n%v", vc.queries, wantqueries) + } +} + +func TestLookupNonUniqueUpdate(t *testing.T) { + lookupNonUnique := createLookup(t, "lookup", false) + vc := &vcursor{} + + err := lookupNonUnique.(Lookup).Update(vc, []sqltypes.Value{sqltypes.NewInt64(1)}, []byte("test"), []sqltypes.Value{sqltypes.NewInt64(2)}) + if err != nil { + t.Error(err) + } + + wantqueries := []*querypb.BoundQuery{{ + Sql: "delete from t where fromc = :fromc and toc = :toc", + BindVariables: map[string]*querypb.BindVariable{ + "fromc": sqltypes.Int64BindVariable(1), + "toc": sqltypes.BytesBindVariable([]byte("test")), + }, + }, { + Sql: "insert into t(fromc, toc) values(:fromc0, :toc0)", + BindVariables: map[string]*querypb.BindVariable{ + "fromc0": sqltypes.Int64BindVariable(2), + "toc0": sqltypes.BytesBindVariable([]byte("test")), + }, + }} + if !reflect.DeepEqual(vc.queries, wantqueries) { + t.Errorf("lookup.Update queries:\n%v, want\n%v", vc.queries, wantqueries) + } +} + +func createLookup(t *testing.T, name string, writeOnly bool) Vindex { + t.Helper() + write := "false" + if writeOnly { + write = "true" + } + l, err := CreateVindex(name, name, map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "write_only": write, + }) + if err != nil { + t.Fatal(err) + } + return l +} diff --git a/go/vt/vtgate/vindexes/lookup_unique_test.go b/go/vt/vtgate/vindexes/lookup_unique_test.go index a42ca51d0a2..6a81cc8a1b0 100644 --- a/go/vt/vtgate/vindexes/lookup_unique_test.go +++ b/go/vt/vtgate/vindexes/lookup_unique_test.go @@ -22,22 +22,53 @@ import ( "testing" "github.com/youtube/vitess/go/sqltypes" + querypb "github.com/youtube/vitess/go/vt/proto/query" ) +func TestLookupUniqueNew(t *testing.T) { + _ = createLookup(t, "lookup_unique", false) + + _, err := CreateVindex("lookup_unique", "lookup_unique", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "write_only": "true", + }) + want := "write_only cannot be true for a unique lookup vindex" + if err == nil || err.Error() != want { + t.Errorf("Create(bad_scatter): %v, want %s", err, want) + } + + _, err = CreateVindex("lookup_unique", "lookup_unique", map[string]string{ + "table": "t", + "from": "fromc", + "to": "toc", + "write_only": "invalid", + }) + want = "write_only value must be 'true' or 'false': 'invalid'" + if err == nil || err.Error() != want { + t.Errorf("Create(bad_scatter): %v, want %s", err, want) + } +} + func TestLookupUniqueCost(t *testing.T) { + lookupUnique := createLookup(t, "lookup_unique", false) if lookupUnique.Cost() != 10 { t.Errorf("Cost(): %d, want 10", lookupUnique.Cost()) } } func TestLookupUniqueString(t *testing.T) { - if strings.Compare("lookupUnique", lookupUnique.String()) != 0 { - t.Errorf("String(): %s, want lookupUnique", lookupUnique.String()) + lookupUnique := createLookup(t, "lookup_unique", false) + if strings.Compare("lookup_unique", lookupUnique.String()) != 0 { + t.Errorf("String(): %s, want lookup_unique", lookupUnique.String()) } } func TestLookupUniqueMap(t *testing.T) { + lookupUnique := createLookup(t, "lookup_unique", false) vc := &vcursor{numRows: 1} + got, err := lookupUnique.(Unique).Map(vc, []sqltypes.Value{sqltypes.NewInt64(1), sqltypes.NewInt64(2)}) if err != nil { t.Error(err) @@ -78,7 +109,9 @@ func TestLookupUniqueMap(t *testing.T) { } func TestLookupUniqueVerify(t *testing.T) { + lookupUnique := createLookup(t, "lookup_unique", false) vc := &vcursor{numRows: 1} + _, err := lookupUnique.Verify(vc, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("test")}) if err != nil { t.Error(err) @@ -86,45 +119,76 @@ func TestLookupUniqueVerify(t *testing.T) { if got, want := len(vc.queries), 1; got != want { t.Errorf("vc.queries length: %v, want %v", got, want) } +} + +func TestLookupUniqueCreate(t *testing.T) { + lookupUnique, err := CreateVindex("lookup_unique", "lookup_unique", map[string]string{ + "table": "t", + "from": "from", + "to": "toc", + "autocommit": "true", + }) + if err != nil { + t.Fatal(err) + } + vc := &vcursor{} - _, err = lookuphashunique.Verify(nil, []sqltypes.Value{sqltypes.NewInt64(1)}, [][]byte{[]byte("test1test23")}) - want := "lookup.Verify.vunhash: invalid keyspace id: 7465737431746573743233" - if err.Error() != want { + err = lookupUnique.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("test")}, false /* ignoreMode */) + if err != nil { t.Error(err) } + + wantqueries := []*querypb.BoundQuery{{ + Sql: "insert into t(from, toc) values(:from0, :toc0)", + BindVariables: map[string]*querypb.BindVariable{ + "from0": sqltypes.Int64BindVariable(1), + "toc0": sqltypes.BytesBindVariable([]byte("test")), + }, + }} + if !reflect.DeepEqual(vc.queries, wantqueries) { + t.Errorf("lookup.Create queries:\n%v, want\n%v", vc.queries, wantqueries) + } + + if got, want := vc.autocommits, 1; got != want { + t.Errorf("Create(autocommit) count: %d, want %d", got, want) + } } -func TestLookupUniqueCreate(t *testing.T) { +func TestLookupUniqueCreateAutocommit(t *testing.T) { + lookupUnique := createLookup(t, "lookup_unique", false) vc := &vcursor{} - err := lookupUnique.(Lookup).Create(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, [][]byte{[]byte("test")}, false /* ignoreMode */) + + err := lookupUnique.(Lookup).Create(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, [][]byte{[]byte("test")}, false /* ignoreMode */) if err != nil { t.Error(err) } if got, want := len(vc.queries), 1; got != want { t.Errorf("vc.queries length: %v, want %v", got, want) } - - err = lookuphashunique.(Lookup).Create(nil, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, [][]byte{[]byte("test1test23")}, false /* ignoreMode */) - want := "lookup.Create.vunhash: invalid keyspace id: 7465737431746573743233" - if err.Error() != want { - t.Error(err) - } } func TestLookupUniqueDelete(t *testing.T) { + lookupUnique := createLookup(t, "lookup_unique", false) vc := &vcursor{} - err := lookupUnique.(Lookup).Delete(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, []byte("test")) + + err := lookupUnique.(Lookup).Delete(vc, [][]sqltypes.Value{{sqltypes.NewInt64(1)}}, []byte("test")) if err != nil { t.Error(err) } if got, want := len(vc.queries), 1; got != want { t.Errorf("vc.queries length: %v, want %v", got, want) } +} + +func TestLookupUniqueUpdate(t *testing.T) { + lookupUnique := createLookup(t, "lookup_unique", false) + vc := &vcursor{} - //Negative Test - err = lookuphashunique.(Lookup).Delete(vc, [][]sqltypes.Value{[]sqltypes.Value{sqltypes.NewInt64(1)}}, []byte("test1test23")) - want := "lookup.Delete.vunhash: invalid keyspace id: 7465737431746573743233" - if err.Error() != want { + err := lookupUnique.(Lookup).Update(vc, []sqltypes.Value{sqltypes.NewInt64(1)}, []byte("test"), []sqltypes.Value{sqltypes.NewInt64(2)}) + if err != nil { t.Error(err) } + if got, want := len(vc.queries), 2; got != want { + t.Errorf("vc.queries length: %v, want %v", got, want) + } } diff --git a/go/vt/vtgate/vindexes/vindex.go b/go/vt/vtgate/vindexes/vindex.go index d7e5a627ec4..39efc85d2b9 100644 --- a/go/vt/vtgate/vindexes/vindex.go +++ b/go/vt/vtgate/vindexes/vindex.go @@ -22,6 +22,7 @@ import ( "github.com/youtube/vitess/go/sqltypes" querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) // This file defines interfaces and registration for vindexes. @@ -31,6 +32,7 @@ import ( // can use this interface to execute lookup queries. type VCursor interface { Execute(method string, query string, bindvars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) + ExecuteAutocommit(method string, query string, bindvars map[string]*querypb.BindVariable, isDML bool) (*sqltypes.Result, error) } // Vindex defines the interface required to register a vindex. @@ -62,10 +64,17 @@ type Unique interface { Map(cursor VCursor, ids []sqltypes.Value) ([][]byte, error) } +// Ksids represents keyspace ids. It's either a list of keyspace ids +// or a keyrange. +type Ksids struct { + Range *topodatapb.KeyRange + IDs [][]byte +} + // NonUnique defines the interface for a non-unique vindex. // This means that an id can map to multiple keyspace ids. type NonUnique interface { - Map(cursor VCursor, ids []sqltypes.Value) ([][][]byte, error) + Map(cursor VCursor, ids []sqltypes.Value) ([]Ksids, error) } // IsUnique returns true if the Vindex is Unique. @@ -103,7 +112,11 @@ type Lookup interface { // Create creates an association between ids and ksids. If ignoreMode // is true, then the Create should ignore dup key errors. Create(vc VCursor, rowsColValues [][]sqltypes.Value, ksids [][]byte, ignoreMode bool) error - Delete(VCursor, [][]sqltypes.Value, []byte) error + + Delete(vc VCursor, rowsColValues [][]sqltypes.Value, ksid []byte) error + + // Update replaces the mapping of old values with new values for a keyspace id. + Update(vc VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error } // A NewVindexFunc is a function that creates a Vindex based on the diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index d3fb2a3ee3d..d4654f3c566 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -47,6 +47,8 @@ func NewSTFU(name string, params map[string]string) (Vindex, error) { return &stFU{name: name, Params: params}, nil } +var _ Unique = (*stFU)(nil) + // stF satisfies Functional, but no Map. Invalid vindex. type stF struct { name string @@ -67,34 +69,42 @@ type stLN struct { Params map[string]string } -func (v *stLN) String() string { return v.name } -func (*stLN) Cost() int { return 0 } -func (*stLN) Verify(VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil } -func (*stLN) Map(VCursor, []sqltypes.Value) ([][][]byte, error) { return nil, nil } -func (*stLN) Create(VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil } -func (*stLN) Delete(VCursor, [][]sqltypes.Value, []byte) error { return nil } +func (v *stLN) String() string { return v.name } +func (*stLN) Cost() int { return 0 } +func (*stLN) Verify(VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil } +func (*stLN) Map(VCursor, []sqltypes.Value) ([]Ksids, error) { return nil, nil } +func (*stLN) Create(VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil } +func (*stLN) Delete(VCursor, [][]sqltypes.Value, []byte) error { return nil } +func (*stLN) Update(VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { return nil } func NewSTLN(name string, params map[string]string) (Vindex, error) { return &stLN{name: name, Params: params}, nil } +var _ NonUnique = (*stLN)(nil) +var _ Lookup = (*stLN)(nil) + // stLU satisfies Lookup, Unique. type stLU struct { name string Params map[string]string } -func (v *stLU) String() string { return v.name } -func (*stLU) Cost() int { return 2 } -func (*stLU) Verify(VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil } -func (*stLU) Map(VCursor, []sqltypes.Value) ([][]byte, error) { return nil, nil } -func (*stLU) Create(VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil } -func (*stLU) Delete(VCursor, [][]sqltypes.Value, []byte) error { return nil } +func (v *stLU) String() string { return v.name } +func (*stLU) Cost() int { return 2 } +func (*stLU) Verify(VCursor, []sqltypes.Value, [][]byte) ([]bool, error) { return []bool{}, nil } +func (*stLU) Map(VCursor, []sqltypes.Value) ([][]byte, error) { return nil, nil } +func (*stLU) Create(VCursor, [][]sqltypes.Value, [][]byte, bool) error { return nil } +func (*stLU) Delete(VCursor, [][]sqltypes.Value, []byte) error { return nil } +func (*stLU) Update(VCursor, []sqltypes.Value, []byte, []sqltypes.Value) error { return nil } func NewSTLU(name string, params map[string]string) (Vindex, error) { return &stLU{name: name, Params: params}, nil } +var _ Unique = (*stLU)(nil) +var _ Lookup = (*stLU)(nil) + func init() { Register("stfu", NewSTFU) Register("stf", NewSTF) diff --git a/go/vt/vtgate/vschema_stats.go b/go/vt/vtgate/vschema_stats.go index e1df67a0e95..6def0dd9e5d 100644 --- a/go/vt/vtgate/vschema_stats.go +++ b/go/vt/vtgate/vschema_stats.go @@ -77,14 +77,17 @@ const ( - + {{if .Error}} -{{else}} + + + +{{end}} @@ -97,7 +100,6 @@ const ( {{end}} -{{end}}
VSchema{{if not .Error}} in JSON{{end}}VSchema Cache in JSON
Error {{$.Error}}
colspan="4">
Keyspace Sharded{{$ks.TableCount}} {{$ks.VindexCount}}
` ) diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 6ee7f9639e1..6f1b8aed3a6 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -29,6 +29,7 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/acl" + "github.com/youtube/vitess/go/flagutil" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/stats" "github.com/youtube/vitess/go/tb" @@ -52,11 +53,14 @@ import ( ) var ( - transactionMode = flag.String("transaction_mode", "MULTI", "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit") - normalizeQueries = flag.Bool("normalize_queries", true, "Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars.") - streamBufferSize = flag.Int("stream_buffer_size", 32*1024, "the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size.") - queryPlanCacheSize = flag.Int64("gate_query_cache_size", 10000, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") - legacyAutocommit = flag.Bool("legacy_autocommit", false, "DEPRECATED: set this flag to true to get the legacy behavior: all transactions will need an explicit begin, and DMLs outside transactions will return an error.") + transactionMode = flag.String("transaction_mode", "MULTI", "SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit") + normalizeQueries = flag.Bool("normalize_queries", true, "Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars.") + streamBufferSize = flag.Int("stream_buffer_size", 32*1024, "the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size.") + queryPlanCacheSize = flag.Int64("gate_query_cache_size", 10000, "gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache.") + legacyAutocommit = flag.Bool("legacy_autocommit", false, "DEPRECATED: set this flag to true to get the legacy behavior: all transactions will need an explicit begin, and DMLs outside transactions will return an error.") + enableForwarding = flag.Bool("enable_forwarding", false, "if specified, this process will also expose a QueryService interface that allows other vtgates to talk through this vtgate to the underlying tablets.") + l2vtgateAddrs flagutil.StringListValue + disableLocalGateway = flag.Bool("disable_local_gateway", false, "if specified, this process will not route any queries to local tablets in the local cell") ) func getTxMode() vtgatepb.TransactionMode { @@ -111,6 +115,8 @@ type VTGate struct { executor *Executor resolver *Resolver txConn *TxConn + gw gateway.Gateway + l2vtgate *L2VTGate // stats objects. // TODO(sougou): This needs to be cleaned up. There @@ -153,20 +159,49 @@ func Init(ctx context.Context, hc discovery.HealthCheck, topoServer *topo.Server // Build objects from low to high level. // Start with the gateway. If we can't reach the topology service, // we can't go on much further, so we log.Fatal out. - gw := gateway.GetCreator()(hc, topoServer, serv, cell, retryCount) - if err := gateway.WaitForTablets(gw, tabletTypesToWait); err != nil { - log.Fatalf("gateway.WaitForTablets failed: %v", err) + var gw gateway.Gateway + var l2vtgate *L2VTGate + if !*disableLocalGateway { + gw = gateway.GetCreator()(hc, topoServer, serv, cell, retryCount) + if err := gateway.WaitForTablets(gw, tabletTypesToWait); err != nil { + log.Fatalf("gateway.WaitForTablets failed: %v", err) + } + + // l2vtgate gives access to the underlying Gateway + // from an exported QueryService interface. + if *enableForwarding { + l2vtgate = initL2VTGate(gw) + } + } + + // If we have other vtgate pools to connect to, create a + // HybridGateway to perform the routing. + if len(l2vtgateAddrs) > 0 { + hgw, err := gateway.NewHybridGateway(gw, l2vtgateAddrs, retryCount) + if err != nil { + log.Fatalf("gateway.NewHybridGateway failed: %v", err) + } + hgw.RegisterStats() + gw = hgw + } + + // Check we have something to do. + if gw == nil { + log.Fatalf("'-disable_local_gateway' cannot be specified if 'l2vtgate_addrs' is also empty, otherwise this vtgate has no backend") } tc := NewTxConn(gw, getTxMode()) // ScatterConn depends on TxConn to perform forced rollbacks. sc := NewScatterConn("VttabletCall", tc, gw, hc) - resolver := NewResolver(serv, cell, sc) + srvResolver := srvtopo.NewResolver(serv, gw, cell) + resolver := NewResolver(srvResolver, serv, cell, sc) rpcVTGate = &VTGate{ executor: NewExecutor(ctx, serv, cell, "VTGateExecutor", resolver, *normalizeQueries, *streamBufferSize, *queryPlanCacheSize, *legacyAutocommit), resolver: resolver, txConn: tc, + gw: gw, + l2vtgate: l2vtgate, timings: stats.NewMultiTimings("VtgateApi", []string{"Operation", "Keyspace", "DbType"}), rowsReturned: stats.NewMultiCounters("VtgateApiRowsReturned", []string{"Operation", "Keyspace", "DbType"}), @@ -233,6 +268,16 @@ func (vtg *VTGate) IsHealthy() error { return nil } +// Gateway returns the current gateway implementation. Mostly used for tests. +func (vtg *VTGate) Gateway() gateway.Gateway { + return vtg.gw +} + +// L2VTGate returns the L2VTGate object. Mostly used for tests. +func (vtg *VTGate) L2VTGate() *L2VTGate { + return vtg.l2vtgate +} + // Execute executes a non-streaming query. This is a V3 function. func (vtg *VTGate) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (newSession *vtgatepb.Session, qr *sqltypes.Result, err error) { target := vtg.executor.ParseTarget(session.TargetString) @@ -305,8 +350,8 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, session *vtgatepb.Session, bindVariables, target.Keyspace, target.TabletType, - func(keyspace string) (string, []string, error) { - return keyspace, []string{target.Shard}, nil + func(keyspace string) ([]*srvtopo.ResolvedShard, error) { + return vtg.resolver.resolver.ResolveShards(ctx, keyspace, []string{target.Shard}, target.TabletType) }, session.Options, func(reply *sqltypes.Result) error { @@ -339,7 +384,8 @@ handleError: return nil } -// ExecuteShards executes a non-streaming query on the specified shards. This is a legacy function. +// ExecuteShards executes a non-streaming query on the specified shards. +// This is a legacy function. func (vtg *VTGate) ExecuteShards(ctx context.Context, sql string, bindVariables map[string]*querypb.BindVariable, keyspace string, shards []string, tabletType topodatapb.TabletType, session *vtgatepb.Session, notInTransaction bool, options *querypb.ExecuteOptions) (*sqltypes.Result, error) { startTime := time.Now() ltt := topoproto.TabletTypeLString(tabletType) @@ -363,8 +409,8 @@ func (vtg *VTGate) ExecuteShards(ctx context.Context, sql string, bindVariables keyspace, tabletType, session, - func(keyspace string) (string, []string, error) { - return keyspace, shards, nil + func() ([]*srvtopo.ResolvedShard, error) { + return vtg.resolver.resolver.ResolveShards(ctx, keyspace, shards, tabletType) }, notInTransaction, options, @@ -531,7 +577,7 @@ func (vtg *VTGate) ExecuteBatchShards(ctx context.Context, queries []*vtgatepb.B session, options, func() (*scatterBatchRequest, error) { - return boundShardQueriesToScatterBatchRequest(queries) + return boundShardQueriesToScatterBatchRequest(ctx, vtg.resolver.resolver, queries, tabletType) }) if err == nil { var rowCount int64 @@ -573,13 +619,15 @@ func (vtg *VTGate) ExecuteBatchKeyspaceIds(ctx context.Context, queries []*vtgat annotateBoundKeyspaceIDQueries(queries) - qrs, err = vtg.resolver.ExecuteBatchKeyspaceIds( + qrs, err = vtg.resolver.ExecuteBatch( ctx, - queries, tabletType, asTransaction, session, - options) + options, + func() (*scatterBatchRequest, error) { + return boundKeyspaceIDQueriesToScatterBatchRequest(ctx, vtg.resolver.resolver, queries, tabletType) + }) if err == nil { var rowCount int64 for _, qr := range qrs { @@ -715,8 +763,8 @@ func (vtg *VTGate) StreamExecuteShards(ctx context.Context, sql string, bindVari bindVariables, keyspace, tabletType, - func(keyspace string) (string, []string, error) { - return keyspace, shards, nil + func(keyspace string) ([]*srvtopo.ResolvedShard, error) { + return vtg.resolver.resolver.ResolveShards(ctx, keyspace, shards, tabletType) }, options, func(reply *sqltypes.Result) error { @@ -814,63 +862,58 @@ func (vtg *VTGate) SplitQuery( } // TODO(erez): Add validation of SplitQuery parameters. - keyspace, srvKeyspace, shardRefs, err := srvtopo.GetKeyspaceShards( - ctx, vtg.resolver.toposerv, vtg.resolver.cell, keyspace, topodatapb.TabletType_RDONLY) + rss, srvKeyspace, err := vtg.resolver.resolver.GetAllShards(ctx, keyspace, topodatapb.TabletType_RDONLY) if err != nil { return nil, err } // If the caller specified a splitCount (vs. specifying 'numRowsPerQueryPart') scale it by the // number of shards (otherwise it stays 0). - perShardSplitCount := int64(math.Ceil(float64(splitCount) / float64(len(shardRefs)))) + perShardSplitCount := int64(math.Ceil(float64(splitCount) / float64(len(rss)))) // Determine whether to return SplitQueryResponse_KeyRangeParts or SplitQueryResponse_ShardParts. // We return 'KeyRangeParts' for sharded keyspaces that are not custom sharded. If the // keyspace is custom sharded or unsharded we return 'ShardParts'. var querySplitToQueryPartFunc func( - querySplit *querypb.QuerySplit, shard string) (*vtgatepb.SplitQueryResponse_Part, error) + querySplit *querypb.QuerySplit, rs *srvtopo.ResolvedShard) (*vtgatepb.SplitQueryResponse_Part, error) if vtg.isKeyspaceRangeBasedSharded(keyspace, srvKeyspace) { - // Index the shard references in 'shardRefs' by shard name. - shardRefByName := make(map[string]*topodatapb.ShardReference, len(shardRefs)) - for _, shardRef := range shardRefs { - shardRefByName[shardRef.Name] = shardRef - } - querySplitToQueryPartFunc = func(querySplit *querypb.QuerySplit, shard string) (*vtgatepb.SplitQueryResponse_Part, error) { - // TODO(erez): Assert that shardRefByName contains an entry for 'shard'. - // Keyrange can be nil for the shard (e.g. for single-sharded keyspaces during resharding). - // In this case we append an empty keyrange that represents the entire keyspace. - keyranges := []*topodatapb.KeyRange{{Start: []byte{}, End: []byte{}}} - if shardRefByName[shard].KeyRange != nil { - keyranges = []*topodatapb.KeyRange{shardRefByName[shard].KeyRange} + querySplitToQueryPartFunc = func(querySplit *querypb.QuerySplit, rs *srvtopo.ResolvedShard) (*vtgatepb.SplitQueryResponse_Part, error) { + // Use ValidateShardName to extract the keyrange. + _, kr, err := topo.ValidateShardName(rs.Target.Shard) + if err != nil { + return nil, fmt.Errorf("cannot extract keyrange from shard name %v: %v", rs.Target.Shard, err) + } + if kr == nil { + // Keyrange can be nil for the shard (e.g. for single-sharded keyspaces during resharding). + // In this case we append an empty keyrange that represents the entire keyspace. + kr = &topodatapb.KeyRange{ + Start: []byte{}, + End: []byte{}, + } } return &vtgatepb.SplitQueryResponse_Part{ Query: querySplit.Query, KeyRangePart: &vtgatepb.SplitQueryResponse_KeyRangePart{ Keyspace: keyspace, - KeyRanges: keyranges, + KeyRanges: []*topodatapb.KeyRange{kr}, }, Size: querySplit.RowCount, }, nil } } else { // Keyspace is either unsharded or custom-sharded. - querySplitToQueryPartFunc = func(querySplit *querypb.QuerySplit, shard string) (*vtgatepb.SplitQueryResponse_Part, error) { + querySplitToQueryPartFunc = func(querySplit *querypb.QuerySplit, rs *srvtopo.ResolvedShard) (*vtgatepb.SplitQueryResponse_Part, error) { return &vtgatepb.SplitQueryResponse_Part{ Query: querySplit.Query, ShardPart: &vtgatepb.SplitQueryResponse_ShardPart{ Keyspace: keyspace, - Shards: []string{shard}, + Shards: []string{rs.Target.Shard}, }, Size: querySplit.RowCount, }, nil } } - // Collect all shard names into a slice. - shardNames := make([]string, 0, len(shardRefs)) - for _, shardRef := range shardRefs { - shardNames = append(shardNames, shardRef.Name) - } return vtg.resolver.scatterConn.SplitQuery( ctx, sql, @@ -879,9 +922,8 @@ func (vtg *VTGate) SplitQuery( perShardSplitCount, numRowsPerQueryPart, algorithm, - shardNames, - querySplitToQueryPartFunc, - keyspace) + rss, + querySplitToQueryPartFunc) } // GetSrvKeyspace is part of the vtgate service API. @@ -897,7 +939,7 @@ func (vtg *VTGate) MessageStream(ctx context.Context, keyspace string, shard str statsKey := []string{"MessageStream", keyspace, ltt} defer vtg.timings.Record(statsKey, startTime) - err := vtg.resolver.MessageStream( + err := vtg.executor.MessageStream( ctx, keyspace, shard, @@ -1106,3 +1148,7 @@ func unambiguousKeyspaceBSQ(queries []*vtgatepb.BoundShardQuery) string { return keyspace } } + +func init() { + flag.Var(&l2vtgateAddrs, "l2vtgate_addrs", "Specifies a comma-separated list of other l2 vtgate pools to connect to. These other vtgates must run with the --enable_forwarding flag") +} diff --git a/go/vt/vtgate/vtgate_test.go b/go/vt/vtgate/vtgate_test.go index 02ce97077c6..8f1c73958f3 100644 --- a/go/vt/vtgate/vtgate_test.go +++ b/go/vt/vtgate/vtgate_test.go @@ -342,7 +342,7 @@ func TestVTGateExecuteWithKeyspaceShard(t *testing.T) { "select id from none", nil, ) - want = "vtgate: : target: TestUnsharded.noshard.master, no valid tablet" + want = "vtgate: : target: TestUnsharded.noshard.master, no valid tablet: node doesn't exist" if err == nil || err.Error() != want { t.Errorf("Execute: %v, want %s", err, want) } @@ -1188,12 +1188,10 @@ func TestVTGateSplitQuerySharded(t *testing.T) { func TestVTGateMessageStreamSharded(t *testing.T) { ks := "TestVTGateMessageStreamSharded" - shard1 := "-20" - shard2 := "20-40" createSandbox(ks) hcVTGateTest.Reset() - _ = hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, shard1, topodatapb.TabletType_MASTER, true, 1, nil) - _ = hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, shard2, topodatapb.TabletType_MASTER, true, 1, nil) + _ = hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1001, ks, "-20", topodatapb.TabletType_MASTER, true, 1, nil) + _ = hcVTGateTest.AddTestTablet("aa", "1.1.1.1", 1002, ks, "20-40", topodatapb.TabletType_MASTER, true, 1, nil) ch := make(chan *sqltypes.Result) done := make(chan struct{}) ctx, cancel := context.WithCancel(context.Background()) @@ -1230,7 +1228,7 @@ func TestVTGateMessageStreamSharded(t *testing.T) { }) want := "keyrange -30 does not exactly match shards" if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("MessageStream: %v, must contain %s", err, want) + t.Errorf("MessageStream: '%v', must contain '%s'", err, want) } } diff --git a/go/vt/vtqueryserver/endtoend_test.go b/go/vt/vtqueryserver/endtoend_test.go index b04a0bb8a58..96801754403 100644 --- a/go/vt/vtqueryserver/endtoend_test.go +++ b/go/vt/vtqueryserver/endtoend_test.go @@ -19,8 +19,8 @@ import ( "context" "flag" "fmt" + "io/ioutil" "os" - "path" "strings" "testing" @@ -77,10 +77,17 @@ func TestMain(m *testing.M) { return 1 } defer cluster.TearDown() - mysqlConnParams = cluster.MySQLConnParams() - proxySock := path.Join(cluster.Env.Directory(), "mysqlproxy.sock") + // Setup a unix socket to connect to the proxy. + // We use a temporary file. + unixSocket, err := ioutil.TempFile("", "mysqlproxy.sock") + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to create temp file: %v", err) + return 1 + } + proxySock := unixSocket.Name() + os.Remove(proxySock) proxyConnParams.UnixSocket = proxySock proxyConnParams.Uname = "proxy" @@ -89,11 +96,10 @@ func TestMain(m *testing.M) { *mysqlServerSocketPath = proxyConnParams.UnixSocket *mysqlAuthServerImpl = "none" + // Initialize the query service on top of the vttest MySQL database. dbcfgs := dbconfigs.DBConfigs{ App: mysqlConnParams, } - - var err error queryServer, err = initProxy(&dbcfgs) if err != nil { fmt.Fprintf(os.Stderr, "could not start proxy: %v\n", err) @@ -101,6 +107,7 @@ func TestMain(m *testing.M) { } defer queryServer.StopService() + // Initialize the MySQL server protocol to talk to the query service. initMySQLProtocol() defer shutdownMySQLProtocol() @@ -301,6 +308,56 @@ func TestNoAutocommit(t *testing.T) { testFetch(t, conn2, "select * from test", 0) } +func TestTransactionsInProcess(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &proxyConnParams) + if err != nil { + t.Fatal(err) + } + conn2, err := mysql.Connect(ctx, &proxyConnParams) + if err != nil { + t.Fatal(err) + } + + testDML(t, conn, "begin", 1, 0) + testDML(t, conn, "insert into test (id, val) values(1, 'hello')", 1, 1) + testFetch(t, conn, "select * from test", 1) + testFetch(t, conn2, "select * from test", 0) + + // A second begin causes the first transaction to commit and then + // runs the begin + testDML(t, conn, "begin", 2, 0) + testFetch(t, conn, "select * from test", 1) + testFetch(t, conn2, "select * from test", 1) + testDML(t, conn, "rollback", 1, 0) + + testFetch(t, conn, "select * from test", 1) + testFetch(t, conn2, "select * from test", 1) + + testDML(t, conn, "set autocommit=0", 0, 0) + testDML(t, conn, "begin", 1, 0) + testDML(t, conn, "insert into test (id, val) values(2, 'hello')", 1, 1) + testFetch(t, conn, "select * from test", 2) + testFetch(t, conn2, "select * from test", 1) + + // Setting autocommit=1 causes the existing transaction to commit + testDML(t, conn, "set autocommit=1", 1, 0) + testFetch(t, conn, "select * from test", 2) + testFetch(t, conn2, "select * from test", 2) + + testDML(t, conn, "insert into test (id, val) values(3, 'hello')", 3, 1) + testFetch(t, conn, "select * from test", 3) + testFetch(t, conn2, "select * from test", 3) + + testDML(t, conn2, "begin", 1, 0) + testDML(t, conn2, "delete from test", 2, 3) + testDML(t, conn2, "commit", 1, 0) + + testFetch(t, conn, "select * from test", 0) + testFetch(t, conn2, "select * from test", 0) + +} + func TestOther(t *testing.T) { ctx := context.Background() conn, err := mysql.Connect(ctx, &proxyConnParams) diff --git a/go/vt/vttablet/endtoend/acl_test.go b/go/vt/vttablet/endtoend/acl_test.go index e988b371c33..a02c43dcbeb 100644 --- a/go/vt/vttablet/endtoend/acl_test.go +++ b/go/vt/vttablet/endtoend/acl_test.go @@ -23,12 +23,13 @@ import ( "testing" "github.com/youtube/vitess/go/sqltypes" - querypb "github.com/youtube/vitess/go/vt/proto/query" "github.com/youtube/vitess/go/vt/vttablet/endtoend/framework" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/rules" + + querypb "github.com/youtube/vitess/go/vt/proto/query" ) -func TestTableACLNoAccess(t *testing.T) { +func TestTableACL(t *testing.T) { client := framework.NewClient() aclErr := "table acl error" @@ -79,6 +80,17 @@ func TestTableACLNoAccess(t *testing.T) { }, { query: "alter table vitess_acl_all_user_read_only comment 'comment'", err: aclErr, + }, { + query: "select * from vitess_acl_read_only, vitess_acl_no_access", + err: aclErr, + }, { + query: "delete from vitess_acl_read_write where key1=(select key1 from vitess_acl_no_access)", + err: aclErr, + }, { + query: "delete from vitess_acl_read_write where key1=(select key1 from vitess_acl_read_only)", + }, { + query: "update vitess_acl_read_write join vitess_acl_read_only on 1!=1 set key1=1", + err: aclErr, }} for _, tcase := range execCases { @@ -90,7 +102,7 @@ func TestTableACLNoAccess(t *testing.T) { continue } if err == nil || !strings.HasPrefix(err.Error(), tcase.err) { - t.Errorf("Error: %v, must start with %s", err, tcase.err) + t.Errorf("Execute(%s): Error: %v, must start with %s", tcase.query, err, tcase.err) } } diff --git a/go/vt/vttablet/heartbeat/heartbeat.go b/go/vt/vttablet/heartbeat/heartbeat.go index 5bdabf0c40a..3f2d0738780 100644 --- a/go/vt/vttablet/heartbeat/heartbeat.go +++ b/go/vt/vttablet/heartbeat/heartbeat.go @@ -43,5 +43,7 @@ var ( readErrors = stats.NewInt("HeartbeatReadErrors") // HeartbeatCumulativeLagNs is incremented by the current lag at each heartbeat read interval. Plotting this // over time allows calculating of a rolling average lag. - lagNs = stats.NewInt("HeartbeatCumulativeLagNs") + cumulativeLagNs = stats.NewInt("HeartbeatCumulativeLagNs") + // HeartbeatCurrentLagNs is a point-in-time calculation of the lag, updated at each heartbeat read interval. + currentLagNs = stats.NewInt("HeartbeatCurrentLagNs") ) diff --git a/go/vt/vttablet/heartbeat/reader.go b/go/vt/vttablet/heartbeat/reader.go index afaf4ede255..128500a641a 100644 --- a/go/vt/vttablet/heartbeat/reader.go +++ b/go/vt/vttablet/heartbeat/reader.go @@ -30,7 +30,6 @@ import ( "github.com/youtube/vitess/go/timer" "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/proto/query" "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -91,7 +90,7 @@ func (r *Reader) InitDBConfig(dbcfgs dbconfigs.DBConfigs) { // Init does last minute initialization of db settings, such as dbName // and keyspaceShard -func (r *Reader) Init(target query.Target) { +func (r *Reader) Init(target querypb.Target) { if !r.enabled { return } @@ -164,7 +163,8 @@ func (r *Reader) readHeartbeat() { } lag := r.now().Sub(time.Unix(0, ts)) - lagNs.Add(lag.Nanoseconds()) + cumulativeLagNs.Add(lag.Nanoseconds()) + currentLagNs.Set(lag.Nanoseconds()) reads.Add(1) r.lagMu.Lock() diff --git a/go/vt/vttablet/heartbeat/reader_test.go b/go/vt/vttablet/heartbeat/reader_test.go index 92e0389babe..037d072b8f1 100644 --- a/go/vt/vttablet/heartbeat/reader_test.go +++ b/go/vt/vttablet/heartbeat/reader_test.go @@ -27,8 +27,9 @@ import ( "github.com/youtube/vitess/go/sqlescape" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/dbconfigs" - "github.com/youtube/vitess/go/vt/proto/query" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" + + querypb "github.com/youtube/vitess/go/vt/proto/query" ) // TestReaderReadHeartbeat tests that reading a heartbeat sets the appropriate @@ -40,7 +41,7 @@ func TestReaderReadHeartbeat(t *testing.T) { defer tr.Close() db.AddQuery(fmt.Sprintf("SELECT ts FROM %s.heartbeat WHERE keyspaceShard='%s'", tr.dbName, tr.keyspaceShard), &sqltypes.Result{ - Fields: []*query.Field{ + Fields: []*querypb.Field{ {Name: "ts", Type: sqltypes.Int64}, }, Rows: [][]sqltypes.Value{{ @@ -48,7 +49,7 @@ func TestReaderReadHeartbeat(t *testing.T) { }}, }) - lagNs.Set(0) + cumulativeLagNs.Set(0) readErrors.Set(0) reads.Set(0) @@ -61,7 +62,7 @@ func TestReaderReadHeartbeat(t *testing.T) { if got, want := lag, 10*time.Second; got != want { t.Fatalf("wrong latest lag: got = %v, want = %v", tr.lastKnownLag, want) } - if got, want := lagNs.Get(), 10*time.Second.Nanoseconds(); got != want { + if got, want := cumulativeLagNs.Get(), 10*time.Second.Nanoseconds(); got != want { t.Fatalf("wrong cumulative lag: got = %v, want = %v", got, want) } if got, want := reads.Get(), int64(1); got != want { @@ -80,7 +81,7 @@ func TestReaderReadHeartbeatError(t *testing.T) { tr := newReader(db, mockNowFunc) defer tr.Close() - lagNs.Set(0) + cumulativeLagNs.Set(0) readErrors.Set(0) tr.readHeartbeat() @@ -92,7 +93,7 @@ func TestReaderReadHeartbeatError(t *testing.T) { if got, want := lag, 0*time.Second; got != want { t.Fatalf("wrong lastKnownLag: got = %v, want = %v", got, want) } - if got, want := lagNs.Get(), int64(0); got != want { + if got, want := cumulativeLagNs.Get(), int64(0); got != want { t.Fatalf("wrong cumulative lag: got = %v, want = %v", got, want) } if got, want := readErrors.Get(), int64(1); got != want { diff --git a/go/vt/vttablet/heartbeat/writer.go b/go/vt/vttablet/heartbeat/writer.go index 812c571b272..74139a97161 100644 --- a/go/vt/vttablet/heartbeat/writer.go +++ b/go/vt/vttablet/heartbeat/writer.go @@ -33,13 +33,12 @@ import ( "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/dbconnpool" "github.com/youtube/vitess/go/vt/logutil" - "github.com/youtube/vitess/go/vt/proto/query" - "github.com/youtube/vitess/go/vt/proto/topodata" "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "github.com/youtube/vitess/go/vt/proto/query" + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) const ( @@ -61,7 +60,7 @@ type Writer struct { enabled bool interval time.Duration - tabletAlias topodata.TabletAlias + tabletAlias topodatapb.TabletAlias keyspaceShard string dbName string now func() time.Time @@ -74,7 +73,7 @@ type Writer struct { } // NewWriter creates a new Writer. -func NewWriter(checker connpool.MySQLChecker, alias topodata.TabletAlias, config tabletenv.TabletConfig) *Writer { +func NewWriter(checker connpool.MySQLChecker, alias topodatapb.TabletAlias, config tabletenv.TabletConfig) *Writer { if !config.HeartbeatEnable { return &Writer{} } @@ -96,7 +95,7 @@ func (w *Writer) InitDBConfig(dbcfgs dbconfigs.DBConfigs) { // Init runs at tablet startup and last minute initialization of db settings, and // creates the necessary tables for heartbeat. -func (w *Writer) Init(target query.Target) error { +func (w *Writer) Init(target querypb.Target) error { if !w.enabled { return nil } diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission.go b/go/vt/vttablet/tabletserver/planbuilder/permission.go new file mode 100644 index 00000000000..944c4413ba9 --- /dev/null +++ b/go/vt/vttablet/tabletserver/planbuilder/permission.go @@ -0,0 +1,114 @@ +/* +Copyright 2018 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "fmt" + + "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/tableacl" +) + +// Permission associates the required access permission +// for each table. +type Permission struct { + TableName string + Role tableacl.Role +} + +// BuildPermissions builds the list of required permissions for all the +// tables referenced in a query. +func BuildPermissions(stmt sqlparser.Statement) []Permission { + var permissions []Permission + // All Statement types myst be covered here. + switch node := stmt.(type) { + case *sqlparser.Union, *sqlparser.Select: + permissions = buildSubqueryPermissions(node, tableacl.READER, permissions) + case *sqlparser.Insert: + permissions = buildTableNamePermissions(node.Table, tableacl.WRITER, permissions) + permissions = buildSubqueryPermissions(node, tableacl.READER, permissions) + case *sqlparser.Update: + permissions = buildTableExprsPermissions(node.TableExprs, tableacl.WRITER, permissions) + permissions = buildSubqueryPermissions(node, tableacl.READER, permissions) + case *sqlparser.Delete: + permissions = buildTableExprsPermissions(node.TableExprs, tableacl.WRITER, permissions) + permissions = buildSubqueryPermissions(node, tableacl.READER, permissions) + case *sqlparser.Set, *sqlparser.Show, *sqlparser.OtherRead: + // no-op + case *sqlparser.DDL: + if !node.Table.IsEmpty() { + permissions = buildTableNamePermissions(node.Table, tableacl.ADMIN, permissions) + } + if !node.NewName.IsEmpty() { + permissions = buildTableNamePermissions(node.NewName, tableacl.ADMIN, permissions) + } + case *sqlparser.OtherAdmin: + // no op + default: + panic(fmt.Errorf("BUG: unexpected statement type: %T", node)) + } + return permissions +} + +func buildSubqueryPermissions(stmt sqlparser.Statement, role tableacl.Role, permissions []Permission) []Permission { + _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { + switch node := node.(type) { + case *sqlparser.Select: + permissions = buildTableExprsPermissions(node.From, role, permissions) + case sqlparser.TableExprs: + return false, nil + } + return true, nil + }, stmt) + return permissions +} + +func buildTableExprsPermissions(node sqlparser.TableExprs, role tableacl.Role, permissions []Permission) []Permission { + for _, node := range node { + permissions = buildTableExprPermissions(node, role, permissions) + } + return permissions +} + +func buildTableExprPermissions(node sqlparser.TableExpr, role tableacl.Role, permissions []Permission) []Permission { + switch node := node.(type) { + case *sqlparser.AliasedTableExpr: + // An AliasedTableExpr can also be a subquery, but we should skip them here + // because the buildSubQueryPermissions walker will catch them and extract + // the corresponding table names. + switch node := node.Expr.(type) { + case sqlparser.TableName: + permissions = buildTableNamePermissions(node, role, permissions) + case *sqlparser.Subquery: + permissions = buildSubqueryPermissions(node.Select, role, permissions) + } + case *sqlparser.ParenTableExpr: + permissions = buildTableExprsPermissions(node.Exprs, role, permissions) + case *sqlparser.JoinTableExpr: + permissions = buildTableExprPermissions(node.LeftExpr, role, permissions) + permissions = buildTableExprPermissions(node.RightExpr, role, permissions) + } + return permissions +} + +func buildTableNamePermissions(node sqlparser.TableName, role tableacl.Role, permissions []Permission) []Permission { + permissions = append(permissions, Permission{ + TableName: node.Name.String(), + Role: role, + }) + return permissions +} diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go new file mode 100644 index 00000000000..a34cec96c21 --- /dev/null +++ b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go @@ -0,0 +1,181 @@ +/* +Copyright 2018 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package planbuilder + +import ( + "reflect" + "testing" + + "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/tableacl" +) + +func TestBuildPermissions(t *testing.T) { + tcases := []struct { + input string + output []Permission + }{{ + input: "select * from t", + output: []Permission{{ + TableName: "t", + Role: tableacl.READER, + }}, + }, { + input: "select * from t1 union select * from t2", + output: []Permission{{ + TableName: "t1", + Role: tableacl.READER, + }, { + TableName: "t2", + Role: tableacl.READER, + }}, + }, { + input: "insert into t values()", + output: []Permission{{ + TableName: "t", + Role: tableacl.WRITER, + }}, + }, { + input: "update t set a=1", + output: []Permission{{ + TableName: "t", + Role: tableacl.WRITER, + }}, + }, { + input: "delete from t", + output: []Permission{{ + TableName: "t", + Role: tableacl.WRITER, + }}, + }, { + input: "set a=1", + output: nil, + }, { + input: "show variable like 'a%'", + output: nil, + }, { + input: "describe t", + output: nil, + }, { + input: "create table t", + output: []Permission{{ + TableName: "t", + Role: tableacl.ADMIN, + }}, + }, { + input: "rename table t1 to t2", + output: []Permission{{ + TableName: "t1", + Role: tableacl.ADMIN, + }, { + TableName: "t2", + Role: tableacl.ADMIN, + }}, + }, { + input: "drop table t", + output: []Permission{{ + TableName: "t", + Role: tableacl.ADMIN, + }}, + }, { + input: "repair t", + output: nil, + }, { + input: "select (select a from t2) from t1", + output: []Permission{{ + TableName: "t1", + Role: tableacl.READER, + }, { + TableName: "t2", + Role: tableacl.READER, + }}, + }, { + input: "insert into t1 values((select a from t2), 1)", + output: []Permission{{ + TableName: "t1", + Role: tableacl.WRITER, + }, { + TableName: "t2", + Role: tableacl.READER, + }}, + }, { + input: "update t1 set a = (select b from t2)", + output: []Permission{{ + TableName: "t1", + Role: tableacl.WRITER, + }, { + TableName: "t2", + Role: tableacl.READER, + }}, + }, { + input: "delete from t1 where a = (select b from t2)", + output: []Permission{{ + TableName: "t1", + Role: tableacl.WRITER, + }, { + TableName: "t2", + Role: tableacl.READER, + }}, + }, { + input: "select * from t1, t2", + output: []Permission{{ + TableName: "t1", + Role: tableacl.READER, + }, { + TableName: "t2", + Role: tableacl.READER, + }}, + }, { + input: "select * from (t1, t2)", + output: []Permission{{ + TableName: "t1", + Role: tableacl.READER, + }, { + TableName: "t2", + Role: tableacl.READER, + }}, + }, { + input: "update t1 join t2 on a=b set c=d", + output: []Permission{{ + TableName: "t1", + Role: tableacl.WRITER, + }, { + TableName: "t2", + Role: tableacl.WRITER, + }}, + }, { + input: "update (select * from t1) as a join t2 on a=b set c=d", + output: []Permission{{ + TableName: "t1", + Role: tableacl.WRITER, + }, { + TableName: "t2", + Role: tableacl.WRITER, + }}, + }} + + for _, tcase := range tcases { + stmt, err := sqlparser.Parse(tcase.input) + if err != nil { + t.Fatal(err) + } + got := BuildPermissions(stmt) + if !reflect.DeepEqual(got, tcase.output) { + t.Errorf("BuildPermissions(%s): %v, want %v", tcase.input, got, tcase.output) + } + } +} diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index 8f20563c53f..5416b8db2a5 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -21,11 +21,12 @@ import ( "fmt" "github.com/youtube/vitess/go/sqltypes" - vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" "github.com/youtube/vitess/go/vt/sqlparser" "github.com/youtube/vitess/go/vt/tableacl" "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" + + vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) var ( @@ -138,8 +139,6 @@ func (pt PlanType) MinRole() tableacl.Role { return tableACLRoles[pt] } -//_______________________________________________ - var tableACLRoles = map[PlanType]tableacl.Role{ PlanPassSelect: tableacl.READER, PlanSelectLock: tableacl.READER, @@ -211,6 +210,9 @@ type Plan struct { // NewName is the new name of the table. Set for DDLs which create or change the table. NewName sqlparser.TableIdent + // Permissions stores the permissions for the tables accessed in the query. + Permissions []Permission + // FieldQuery is used to fetch field info FieldQuery *sqlparser.ParsedQuery @@ -267,34 +269,40 @@ func Build(sql string, tables map[string]*schema.Table) (*Plan, error) { if err != nil { return nil, err } + var plan *Plan switch stmt := statement.(type) { case *sqlparser.Union: - return &Plan{ + plan, err = &Plan{ PlanID: PlanPassSelect, FieldQuery: GenerateFieldQuery(stmt), FullQuery: GenerateLimitQuery(stmt), }, nil case *sqlparser.Select: - return analyzeSelect(stmt, tables) + plan, err = analyzeSelect(stmt, tables) case *sqlparser.Insert: - return analyzeInsert(stmt, tables) + plan, err = analyzeInsert(stmt, tables) case *sqlparser.Update: - return analyzeUpdate(stmt, tables) + plan, err = analyzeUpdate(stmt, tables) case *sqlparser.Delete: - return analyzeDelete(stmt, tables) + plan, err = analyzeDelete(stmt, tables) case *sqlparser.Set: - return analyzeSet(stmt), nil + plan, err = analyzeSet(stmt), nil case *sqlparser.DDL: - return analyzeDDL(stmt, tables), nil + plan, err = analyzeDDL(stmt, tables), nil case *sqlparser.Show: - return &Plan{PlanID: PlanOtherRead}, nil + plan, err = &Plan{PlanID: PlanOtherRead}, nil case *sqlparser.OtherRead: - return &Plan{PlanID: PlanOtherRead}, nil + plan, err = &Plan{PlanID: PlanOtherRead}, nil case *sqlparser.OtherAdmin: - return &Plan{PlanID: PlanOtherAdmin}, nil + plan, err = &Plan{PlanID: PlanOtherAdmin}, nil + default: + return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "invalid SQL") } - - return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, "invalid SQL") + if err != nil { + return nil, err + } + plan.Permissions = BuildPermissions(statement) + return plan, nil } // BuildStreaming builds a streaming plan based on the schema. @@ -305,8 +313,9 @@ func BuildStreaming(sql string, tables map[string]*schema.Table) (*Plan, error) } plan := &Plan{ - PlanID: PlanSelectStream, - FullQuery: GenerateFullQuery(statement), + PlanID: PlanSelectStream, + FullQuery: GenerateFullQuery(statement), + Permissions: BuildPermissions(statement), } switch stmt := statement.(type) { @@ -338,5 +347,9 @@ func BuildMessageStreaming(name string, tables map[string]*schema.Table) (*Plan, if plan.Table.Type != schema.Message { return nil, vterrors.Errorf(vtrpcpb.Code_FAILED_PRECONDITION, "'%s' is not a message table", name) } + plan.Permissions = []Permission{{ + TableName: plan.Table.Name.String(), + Role: tableacl.WRITER, + }} return plan, nil } diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go index 78577f3137d..71bc2e750f9 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go @@ -33,18 +33,18 @@ import ( "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/testfiles" "github.com/youtube/vitess/go/vt/sqlparser" + "github.com/youtube/vitess/go/vt/tableacl" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" ) -// toJSON returns a JSON of the given Plan. -// Except for "TableName", it's a 1:1 copy of the fields of "Plan". -// (The JSON output is used in the tests to compare it against the data in the -// golden files e.g. data/test/tabletserver/exec_cases.txt.) -func toJSON(p *Plan) ([]byte, error) { +// MarshalJSON returns a JSON of the given Plan. +// This is only for testing. +func (p *Plan) MarshalJSON() ([]byte, error) { mplan := struct { PlanID PlanType Reason ReasonType `json:",omitempty"` TableName sqlparser.TableIdent `json:",omitempty"` + Permissions []Permission `json:",omitempty"` FieldQuery *sqlparser.ParsedQuery `json:",omitempty"` FullQuery *sqlparser.ParsedQuery `json:",omitempty"` OuterQuery *sqlparser.ParsedQuery `json:",omitempty"` @@ -59,6 +59,7 @@ func toJSON(p *Plan) ([]byte, error) { PlanID: p.PlanID, Reason: p.Reason, TableName: p.TableName(), + Permissions: p.Permissions, FieldQuery: p.FieldQuery, FullQuery: p.FullQuery, OuterQuery: p.OuterQuery, @@ -86,7 +87,7 @@ func TestPlan(t *testing.T) { if err != nil { out = err.Error() } else { - bout, err := toJSON(plan) + bout, err := json.Marshal(plan) if err != nil { t.Fatalf("Error marshalling %v: %v", plan, err) } @@ -129,7 +130,7 @@ func TestCustom(t *testing.T) { if err != nil { out = err.Error() } else { - bout, err := toJSON(plan) + bout, err := json.Marshal(plan) if err != nil { t.Fatalf("Error marshalling %v: %v", plan, err) } @@ -151,7 +152,7 @@ func TestStreamPlan(t *testing.T) { if err != nil { out = err.Error() } else { - bout, err := toJSON(plan) + bout, err := json.Marshal(plan) if err != nil { t.Fatalf("Error marshalling %v: %v", plan, err) } @@ -184,14 +185,18 @@ func TestMessageStreamingPlan(t *testing.T) { if err != nil { t.Error(err) } - bout, _ := toJSON(plan) + bout, _ := json.Marshal(plan) planJSON := string(bout) wantPlan := &Plan{ PlanID: PlanMessageStream, Table: testSchema["msg"], + Permissions: []Permission{{ + TableName: "msg", + Role: tableacl.WRITER, + }}, } - bout, _ = toJSON(wantPlan) + bout, _ = json.Marshal(wantPlan) wantJSON := string(bout) if planJSON != wantJSON { diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go index e5c1461e8ac..1e366110d51 100644 --- a/go/vt/vttablet/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -56,9 +56,10 @@ import ( // and track stats. type TabletPlan struct { *planbuilder.Plan - Fields []*querypb.Field - Rules *rules.Rules - Authorized *tableacl.ACLResult + Fields []*querypb.Field + Rules *rules.Rules + LegacyAuthorized *tableacl.ACLResult + Authorized []*tableacl.ACLResult mu sync.Mutex QueryCount int64 @@ -96,6 +97,14 @@ func (ep *TabletPlan) Stats() (queryCount int64, duration, mysqlTime time.Durati return } +// buildAuthorized builds 'Authorized', which is the runtime part for 'Permissions'. +func (ep *TabletPlan) buildAuthorized() { + ep.Authorized = make([]*tableacl.ACLResult, len(ep.Permissions)) + for i, perm := range ep.Permissions { + ep.Authorized[i] = tableacl.Authorized(perm.TableName, perm.Role) + } +} + //_______________________________________________ // QueryEngine implements the core functionality of tabletserver. @@ -314,7 +323,8 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats } plan := &TabletPlan{Plan: splan} plan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableName().String()) - plan.Authorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) + plan.LegacyAuthorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) + plan.buildAuthorized() if plan.PlanID.IsSelect() { if plan.FieldQuery != nil { conn, err := qe.conns.Get(ctx) @@ -352,7 +362,8 @@ func (qe *QueryEngine) GetStreamPlan(sql string) (*TabletPlan, error) { } plan := &TabletPlan{Plan: splan} plan.Rules = qe.queryRuleSources.FilterByPlan(sql, plan.PlanID, plan.TableName().String()) - plan.Authorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) + plan.LegacyAuthorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) + plan.buildAuthorized() return plan, nil } @@ -366,7 +377,8 @@ func (qe *QueryEngine) GetMessageStreamPlan(name string) (*TabletPlan, error) { } plan := &TabletPlan{Plan: splan} plan.Rules = qe.queryRuleSources.FilterByPlan("stream from "+name, plan.PlanID, plan.TableName().String()) - plan.Authorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) + plan.LegacyAuthorized = tableacl.Authorized(plan.TableName().String(), plan.PlanID.MinRole()) + plan.buildAuthorized() return plan, nil } diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index 7b52edcdbec..bf6513d066f 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -30,6 +30,7 @@ import ( "github.com/youtube/vitess/go/mysql/fakesqldb" "github.com/youtube/vitess/go/sqltypes" "github.com/youtube/vitess/go/vt/dbconfigs" + "github.com/youtube/vitess/go/vt/tableacl" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/planbuilder" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/schema/schematest" @@ -127,6 +128,10 @@ func TestGetMessageStreamPlan(t *testing.T) { wantPlan := &planbuilder.Plan{ PlanID: planbuilder.PlanMessageStream, Table: qe.tables["msg"], + Permissions: []planbuilder.Permission{{ + TableName: "msg", + Role: tableacl.WRITER, + }}, } if !reflect.DeepEqual(plan.Plan, wantPlan) { t.Errorf("GetMessageStreamPlan(msg): %v, want %v", plan.Plan, wantPlan) diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index faf8a2fb2d2..c284b635d82 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -17,6 +17,7 @@ limitations under the License. package tabletserver import ( + "flag" "fmt" "io" "strings" @@ -45,6 +46,9 @@ import ( vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" ) +// TODO(sougou): remove after affected parties have transitioned to new behavior. +var legacyTableACL = flag.Bool("legacy-table-acl", false, "deprecated: this flag can be used to revert to the older table ACL behavior, which checked access for at most one table") + // QueryExecutor is used for executing a query request. type QueryExecutor struct { query string @@ -58,22 +62,6 @@ type QueryExecutor struct { tsv *TabletServer } -// NewQueryExecutor creates a new QueryExecutor with the given contents. It is -// used by vtexplain to create the struct ouside the package and assign the private -// members. -func NewQueryExecutor(ctx context.Context, query string, bindVars map[string]*querypb.BindVariable, transactionID int64, options *querypb.ExecuteOptions, plan *TabletPlan, logStats *tabletenv.LogStats, tsv *TabletServer) *QueryExecutor { - return &QueryExecutor{ - query: query, - bindVars: bindVars, - transactionID: transactionID, - options: options, - plan: plan, - ctx: ctx, - logStats: logStats, - tsv: tsv, - } -} - var sequenceFields = []*querypb.Field{ { Name: "nextval", @@ -283,7 +271,7 @@ func (qre *QueryExecutor) execDmlAutoCommit() (reply *sqltypes.Result, err error } func (qre *QueryExecutor) execAsTransaction(f func(conn *TxConnection) (*sqltypes.Result, error)) (reply *sqltypes.Result, err error) { - conn, err := qre.tsv.te.txPool.LocalBegin(qre.ctx, qre.options.GetClientFoundRows(), qre.options.GetTransactionIsolation()) + conn, err := qre.tsv.te.txPool.LocalBegin(qre.ctx, qre.options) if err != nil { return nil, err } @@ -351,34 +339,23 @@ func (qre *QueryExecutor) checkPermissions() error { return nil } - // Skip the ACL check if no table name is available in the query or DDL. - if qre.plan.TableName().IsEmpty() && qre.plan.NewName.IsEmpty() { - return nil - } - - // DDL: Check against the new name of the table as well. - if !qre.plan.NewName.IsEmpty() { - altAuthorized := tableacl.Authorized(qre.plan.NewName.String(), qre.plan.PlanID.MinRole()) - err := qre.checkAccess(altAuthorized, qre.plan.NewName, callerID) - if err != nil { - return err + if *legacyTableACL { + if !qre.plan.TableName().IsEmpty() { + return qre.checkAccess(qre.plan.LegacyAuthorized, qre.plan.TableName().String(), callerID) + } + } else { + for i, auth := range qre.plan.Authorized { + if err := qre.checkAccess(auth, qre.plan.Permissions[i].TableName, callerID); err != nil { + return err + } } } - // Actual ACL check: Check if the user is a member of the ACL. - if qre.plan.Authorized == nil { - // Note: This should never happen because tableacl.Authorized() sets this - // field to an "acl.DenyAllACL" ACL if no ACL was found. - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "table acl error: nil acl") - } - if !qre.plan.TableName().IsEmpty() { - return qre.checkAccess(qre.plan.Authorized, qre.plan.TableName(), callerID) - } return nil } -func (qre *QueryExecutor) checkAccess(authorized *tableacl.ACLResult, tableName sqlparser.TableIdent, callerID *querypb.VTGateCallerID) error { - statsKey := []string{tableName.String(), authorized.GroupName, qre.plan.PlanID.String(), callerID.Username} +func (qre *QueryExecutor) checkAccess(authorized *tableacl.ACLResult, tableName string, callerID *querypb.VTGateCallerID) error { + statsKey := []string{tableName, authorized.GroupName, qre.plan.PlanID.String(), callerID.Username} if !authorized.IsMember(callerID) { if qre.tsv.qe.enableTableACLDryRun { tabletenv.TableaclPseudoDenied.Add(statsKey, 1) diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 942075fd8df..7ccc2dc6dd5 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -682,7 +682,7 @@ func (tsv *TabletServer) Begin(ctx context.Context, target *querypb.Target, opti // TODO(erez): I think this should be RESOURCE_EXHAUSTED. return vterrors.Errorf(vtrpcpb.Code_UNAVAILABLE, "Transaction throttled") } - transactionID, err = tsv.te.txPool.Begin(ctx, options.GetClientFoundRows(), options.GetTransactionIsolation()) + transactionID, err = tsv.te.txPool.Begin(ctx, options) logStats.TransactionID = transactionID return err }, @@ -1834,11 +1834,6 @@ func (tsv *TabletServer) endRequest(isTx bool) { } } -// GetPlan is only used from vtexplain -func (tsv *TabletServer) GetPlan(ctx context.Context, logStats *tabletenv.LogStats, sql string) (*TabletPlan, error) { - return tsv.qe.GetPlan(ctx, logStats, sql, false /* skipQueryPlanCache */) -} - func (tsv *TabletServer) registerDebugHealthHandler() { http.HandleFunc("/debug/health", func(w http.ResponseWriter, r *http.Request) { if err := acl.CheckAccessHTTP(r, acl.MONITORING); err != nil { diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index 2258e2a253d..9a184cd6440 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -2777,7 +2777,7 @@ func TestConfigChanges(t *testing.T) { tsv.SetAutoCommit(true) if val := tsv.qe.autoCommit.Get(); !val { - t.Errorf("tsv.qe.autoCommit.Get: %d, want true", val) + t.Errorf("tsv.qe.autoCommit.Get: %v, want true", val) } tsv.SetMaxResultSize(newSize) diff --git a/go/vt/vttablet/tabletserver/tx_engine.go b/go/vt/vttablet/tabletserver/tx_engine.go index 518004eb1b0..9c8c0ccf1ff 100644 --- a/go/vt/vttablet/tabletserver/tx_engine.go +++ b/go/vt/vttablet/tabletserver/tx_engine.go @@ -27,11 +27,12 @@ import ( "github.com/youtube/vitess/go/vt/concurrency" "github.com/youtube/vitess/go/vt/dbconfigs" "github.com/youtube/vitess/go/vt/dtids" - "github.com/youtube/vitess/go/vt/proto/query" "github.com/youtube/vitess/go/vt/vtgate/vtgateconn" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/connpool" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/txlimiter" + + querypb "github.com/youtube/vitess/go/vt/proto/query" ) // TxEngine handles transactions. @@ -223,7 +224,7 @@ outer: if txid > maxid { maxid = txid } - conn, err := te.txPool.LocalBegin(ctx, false, query.ExecuteOptions_DEFAULT) + conn, err := te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { allErr.RecordError(err) continue diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go index 687c4fe622a..2289c88a341 100644 --- a/go/vt/vttablet/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -49,7 +49,7 @@ func TestTxEngineClose(t *testing.T) { // Normal close with timeout wait. te.Open() - c, err := te.txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + c, err := te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -62,7 +62,7 @@ func TestTxEngineClose(t *testing.T) { // Immediate close. te.Open() - c, err = te.txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + c, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -76,7 +76,7 @@ func TestTxEngineClose(t *testing.T) { // Normal close with short grace period. te.shutdownGracePeriod = 250 * time.Millisecond te.Open() - c, err = te.txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + c, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -93,7 +93,7 @@ func TestTxEngineClose(t *testing.T) { // Normal close with short grace period, but pool gets empty early. te.shutdownGracePeriod = 250 * time.Millisecond te.Open() - c, err = te.txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + c, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -117,7 +117,7 @@ func TestTxEngineClose(t *testing.T) { // Immediate close, but connection is in use. te.Open() - c, err = te.txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + c, err = te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } diff --git a/go/vt/vttablet/tabletserver/tx_executor.go b/go/vt/vttablet/tabletserver/tx_executor.go index 26a1d2f808d..c88e96ee43a 100644 --- a/go/vt/vttablet/tabletserver/tx_executor.go +++ b/go/vt/vttablet/tabletserver/tx_executor.go @@ -68,7 +68,7 @@ func (txe *TxExecutor) Prepare(transactionID int64, dtid string) error { return vterrors.Errorf(vtrpcpb.Code_RESOURCE_EXHAUSTED, "prepare failed for transaction %d: %v", transactionID, err) } - localConn, err := txe.te.txPool.LocalBegin(txe.ctx, false, querypb.ExecuteOptions_DEFAULT) + localConn, err := txe.te.txPool.LocalBegin(txe.ctx, &querypb.ExecuteOptions{}) if err != nil { return err } @@ -130,7 +130,7 @@ func (txe *TxExecutor) CommitPrepared(dtid string) error { func (txe *TxExecutor) markFailed(ctx context.Context, dtid string) { tabletenv.InternalErrors.Add("TwopcCommit", 1) txe.te.preparedPool.SetFailed(dtid) - conn, err := txe.te.txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + conn, err := txe.te.txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { log.Errorf("markFailed: Begin failed for dtid %s: %v", dtid, err) return @@ -170,7 +170,7 @@ func (txe *TxExecutor) RollbackPrepared(dtid string, originalID int64) error { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("ROLLBACK_PREPARED", time.Now()) - conn, err := txe.te.txPool.LocalBegin(txe.ctx, false, querypb.ExecuteOptions_DEFAULT) + conn, err := txe.te.txPool.LocalBegin(txe.ctx, &querypb.ExecuteOptions{}) if err != nil { goto returnConn } @@ -200,7 +200,7 @@ func (txe *TxExecutor) CreateTransaction(dtid string, participants []*querypb.Ta return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "2pc is not enabled") } defer tabletenv.QueryStats.Record("CREATE_TRANSACTION", time.Now()) - conn, err := txe.te.txPool.LocalBegin(txe.ctx, false, querypb.ExecuteOptions_DEFAULT) + conn, err := txe.te.txPool.LocalBegin(txe.ctx, &querypb.ExecuteOptions{}) if err != nil { return err } @@ -248,7 +248,7 @@ func (txe *TxExecutor) SetRollback(dtid string, transactionID int64) error { txe.te.txPool.Rollback(txe.ctx, transactionID) } - conn, err := txe.te.txPool.LocalBegin(txe.ctx, false, querypb.ExecuteOptions_DEFAULT) + conn, err := txe.te.txPool.LocalBegin(txe.ctx, &querypb.ExecuteOptions{}) if err != nil { return err } @@ -275,7 +275,7 @@ func (txe *TxExecutor) ConcludeTransaction(dtid string) error { } defer tabletenv.QueryStats.Record("RESOLVE", time.Now()) - conn, err := txe.te.txPool.LocalBegin(txe.ctx, false, querypb.ExecuteOptions_DEFAULT) + conn, err := txe.te.txPool.LocalBegin(txe.ctx, &querypb.ExecuteOptions{}) if err != nil { return err } diff --git a/go/vt/vttablet/tabletserver/tx_pool.go b/go/vt/vttablet/tabletserver/tx_pool.go index fd37a3e44e8..426fb4d90a7 100644 --- a/go/vt/vttablet/tabletserver/tx_pool.go +++ b/go/vt/vttablet/tabletserver/tx_pool.go @@ -66,6 +66,11 @@ var ( } ) +type messageCommitter interface { + UpdateCaches(newMessages map[string][]*messager.MessageRow, changedMessages map[string][]string) + LockDB(newMessages map[string][]*messager.MessageRow, changedMessages map[string][]string) func() +} + // TxPool is the transaction pool for the query service. type TxPool struct { // conns is the 'regular' pool. By default, connections @@ -133,9 +138,10 @@ func (axp *TxPool) Close() { log.Warningf("killing transaction for shutdown: %s", conn.Format(nil)) tabletenv.InternalErrors.Add("StrayTransactions", 1) conn.Close() - conn.conclude(TxClose) + conn.conclude(TxClose, "pool closed") } axp.conns.Close() + axp.foundRowsPool.Close() } // AdjustLastID adjusts the last transaction id to be at least @@ -164,7 +170,7 @@ func (axp *TxPool) transactionKiller() { log.Warningf("killing transaction (exceeded timeout: %v): %s", axp.Timeout(), conn.Format(nil)) tabletenv.KillStats.Add("Transactions", 1) conn.Close() - conn.conclude(TxKill) + conn.conclude(TxKill, fmt.Sprintf("exceeded timeout: %v", axp.Timeout())) } } @@ -175,7 +181,7 @@ func (axp *TxPool) WaitForEmpty() { // Begin begins a transaction, and returns the associated transaction id. // Subsequent statements can access the connection through the transaction id. -func (axp *TxPool) Begin(ctx context.Context, useFoundRows bool, txIsolation querypb.ExecuteOptions_TransactionIsolation) (int64, error) { +func (axp *TxPool) Begin(ctx context.Context, options *querypb.ExecuteOptions) (int64, error) { var conn *connpool.DBConn var err error immediateCaller := callerid.ImmediateCallerIDFromContext(ctx) @@ -197,7 +203,7 @@ func (axp *TxPool) Begin(ctx context.Context, useFoundRows bool, txIsolation que axp.limiter.Release(immediateCaller, effectiveCaller) }() - if useFoundRows { + if options.GetClientFoundRows() { conn, err = axp.foundRowsPool.Get(ctx) } else { conn, err = axp.conns.Get(ctx) @@ -213,7 +219,7 @@ func (axp *TxPool) Begin(ctx context.Context, useFoundRows bool, txIsolation que return 0, err } - if query, ok := txIsolations[txIsolation]; ok { + if query, ok := txIsolations[options.GetTransactionIsolation()]; ok { if _, err := conn.Exec(ctx, query, 1, false); err != nil { return 0, err } @@ -234,17 +240,18 @@ func (axp *TxPool) Begin(ctx context.Context, useFoundRows bool, txIsolation que immediateCaller, effectiveCaller, ), + options.GetWorkload() != querypb.ExecuteOptions_DBA, ) return transactionID, nil } // Commit commits the specified transaction. -func (axp *TxPool) Commit(ctx context.Context, transactionID int64, messager *messager.Engine) error { +func (axp *TxPool) Commit(ctx context.Context, transactionID int64, mc messageCommitter) error { conn, err := axp.Get(transactionID, "for commit") if err != nil { return err } - return axp.LocalCommit(ctx, conn, messager) + return axp.LocalCommit(ctx, conn, mc) } // Rollback rolls back the specified transaction. @@ -269,8 +276,8 @@ func (axp *TxPool) Get(transactionID int64, reason string) (*TxConnection, error // LocalBegin is equivalent to Begin->Get. // It's used for executing transactions within a request. It's safe // to always call LocalConclude at the end. -func (axp *TxPool) LocalBegin(ctx context.Context, useFoundRows bool, txIsolation querypb.ExecuteOptions_TransactionIsolation) (*TxConnection, error) { - transactionID, err := axp.Begin(ctx, useFoundRows, txIsolation) +func (axp *TxPool) LocalBegin(ctx context.Context, options *querypb.ExecuteOptions) (*TxConnection, error) { + transactionID, err := axp.Begin(ctx, options) if err != nil { return nil, err } @@ -278,14 +285,14 @@ func (axp *TxPool) LocalBegin(ctx context.Context, useFoundRows bool, txIsolatio } // LocalCommit is the commit function for LocalBegin. -func (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection, messager *messager.Engine) error { - defer conn.conclude(TxCommit) - defer messager.LockDB(conn.NewMessages, conn.ChangedMessages)() +func (axp *TxPool) LocalCommit(ctx context.Context, conn *TxConnection, mc messageCommitter) error { + defer conn.conclude(TxCommit, "transaction committed") + defer mc.LockDB(conn.NewMessages, conn.ChangedMessages)() if _, err := conn.Exec(ctx, "commit", 1, false); err != nil { conn.Close() return err } - messager.UpdateCaches(conn.NewMessages, conn.ChangedMessages) + mc.UpdateCaches(conn.NewMessages, conn.ChangedMessages) return nil } @@ -298,7 +305,7 @@ func (axp *TxPool) LocalConclude(ctx context.Context, conn *TxConnection) { } func (axp *TxPool) localRollback(ctx context.Context, conn *TxConnection) error { - defer conn.conclude(TxRollback) + defer conn.conclude(TxRollback, "transaction rolled back") if _, err := conn.Exec(ctx, "rollback", 1, false); err != nil { conn.Close() return err @@ -396,7 +403,7 @@ func (txc *TxConnection) BeginAgain(ctx context.Context) error { // active. func (txc *TxConnection) Recycle() { if txc.IsClosed() { - txc.conclude(TxClose) + txc.conclude(TxClose, "closed") } else { txc.pool.activePool.Put(txc.TransactionID) } @@ -407,8 +414,8 @@ func (txc *TxConnection) RecordQuery(query string) { txc.Queries = append(txc.Queries, query) } -func (txc *TxConnection) conclude(conclusion string) { - txc.pool.activePool.Unregister(txc.TransactionID) +func (txc *TxConnection) conclude(conclusion, reason string) { + txc.pool.activePool.Unregister(txc.TransactionID, reason) txc.DBConn.Recycle() txc.DBConn = nil txc.pool.limiter.Release(txc.ImmediateCallerID, txc.EffectiveCallerID) diff --git a/go/vt/vttablet/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go index 5d59f597ebf..093db426d04 100644 --- a/go/vt/vttablet/tabletserver/tx_pool_test.go +++ b/go/vt/vttablet/tabletserver/tx_pool_test.go @@ -33,8 +33,11 @@ import ( "github.com/youtube/vitess/go/vt/vterrors" "github.com/youtube/vitess/go/vt/vttablet/tabletserver/tabletenv" + "regexp" + querypb "github.com/youtube/vitess/go/vt/proto/query" vtrpcpb "github.com/youtube/vitess/go/vt/proto/vtrpc" + "github.com/youtube/vitess/go/vt/vttablet/tabletserver/messager" ) func TestTxPoolExecuteRollback(t *testing.T) { @@ -49,7 +52,7 @@ func TestTxPoolExecuteRollback(t *testing.T) { txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) defer txPool.Close() ctx := context.Background() - transactionID, err := txPool.Begin(ctx, false, querypb.ExecuteOptions_DEFAULT) + transactionID, err := txPool.Begin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -76,11 +79,11 @@ func TestTxPoolRollbackNonBusy(t *testing.T) { txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) defer txPool.Close() ctx := context.Background() - txid1, err := txPool.Begin(ctx, false, querypb.ExecuteOptions_DEFAULT) + txid1, err := txPool.Begin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } - _, err = txPool.Begin(ctx, false, querypb.ExecuteOptions_DEFAULT) + _, err = txPool.Begin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -101,12 +104,15 @@ func TestTxPoolRollbackNonBusy(t *testing.T) { } } -func TestTxPoolTransactionKiller(t *testing.T) { - sql := "alter table test_table add test_column int" +func TestTxPoolTransactionKillerEnforceTimeoutEnabled(t *testing.T) { + sqlWithTimeout := "alter table test_table add test_column int" + sqlWithoutTimeout := "alter table test_table add test_column_no_timeout int" db := fakesqldb.New(t) defer db.Close() - db.AddQuery(sql, &sqltypes.Result{}) + db.AddQuery(sqlWithTimeout, &sqltypes.Result{}) + db.AddQuery(sqlWithoutTimeout, &sqltypes.Result{}) db.AddQuery("begin", &sqltypes.Result{}) + db.AddQuery("rollback", &sqltypes.Result{}) txPool := newTxPool() // make sure transaction killer will run frequent enough @@ -115,22 +121,69 @@ func TestTxPoolTransactionKiller(t *testing.T) { defer txPool.Close() ctx := context.Background() killCount := tabletenv.KillStats.Counts()["Transactions"] - transactionID, err := txPool.Begin(ctx, false, querypb.ExecuteOptions_DEFAULT) + + txWithoutTimeout, err := addQuery(ctx, sqlWithoutTimeout, txPool, querypb.ExecuteOptions_DBA) if err != nil { t.Fatal(err) } - txConn, err := txPool.Get(transactionID, "for query") - if err != nil { + + if _, err := addQuery(ctx, sqlWithTimeout, txPool, querypb.ExecuteOptions_UNSPECIFIED); err != nil { t.Fatal(err) } - txConn.RecordQuery(sql) - txConn.Recycle() - // transaction killer should kill the query + + var ( + killCountDiff int64 + expectedKills = int64(1) + timeoutCh = time.After(5 * time.Second) + ) + + // transaction killer should kill the query the second query + for { + killCountDiff = tabletenv.KillStats.Counts()["Transactions"] - killCount + if killCountDiff >= expectedKills { + break + } + + select { + case <-timeoutCh: + t.Fatal("waited too long for timed transaction to be killed by transaction killer") + default: + } + } + + if killCountDiff > expectedKills { + t.Fatalf("expected only %v query to be killed, but got %v killed", expectedKills, killCountDiff) + } + + txPool.Rollback(ctx, txWithoutTimeout) txPool.WaitForEmpty() - killCountDiff := tabletenv.KillStats.Counts()["Transactions"] - killCount - if killCountDiff != 1 { - t.Fatalf("query: %s should be killed by transaction killer", sql) + + if got, expected := db.GetQueryCalledNum("begin"), 2; got != expected { + t.Fatalf("'begin' called: got=%v, expected=%v", got, expected) + } + if got, expected := db.GetQueryCalledNum(sqlWithoutTimeout), 1; got != expected { + t.Fatalf("'%v' called: got=%v, expected=%v", sqlWithoutTimeout, got, expected) + } + if got, expected := db.GetQueryCalledNum(sqlWithTimeout), 1; got != expected { + t.Fatalf("'%v' called: got=%v, expected=%v", sqlWithTimeout, got, expected) + } + if got, expected := db.GetQueryCalledNum("rollback"), 1; got != expected { + t.Fatalf("'rollback' called: got=%v, expected=%v", got, expected) + } + +} +func addQuery(ctx context.Context, sql string, txPool *TxPool, workload querypb.ExecuteOptions_Workload) (int64, error) { + transactionID, err := txPool.Begin(ctx, &querypb.ExecuteOptions{Workload: workload}) + if err != nil { + return 0, err + } + txConn, err := txPool.Get(transactionID, "for query") + if err != nil { + return 0, err } + txConn.Exec(ctx, sql, 1, false) + txConn.Recycle() + return transactionID, nil } func TestTxPoolClientRowsFound(t *testing.T) { @@ -146,7 +199,7 @@ func TestTxPoolClientRowsFound(t *testing.T) { // Start a 'normal' transaction. It should take a connection // for the normal 'conns' pool. - id1, err := txPool.Begin(ctx, false, querypb.ExecuteOptions_DEFAULT) + id1, err := txPool.Begin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -159,7 +212,7 @@ func TestTxPoolClientRowsFound(t *testing.T) { // Start a 'foundRows' transaction. It should take a connection // from the foundRows pool. - id2, err := txPool.Begin(ctx, true, querypb.ExecuteOptions_DEFAULT) + id2, err := txPool.Begin(ctx, &querypb.ExecuteOptions{ClientFoundRows: true}) if err != nil { t.Fatal(err) } @@ -200,13 +253,13 @@ func TestTxPoolTransactionIsolation(t *testing.T) { ctx := context.Background() // Start a transaction with default. It should not change isolation. - _, err := txPool.Begin(ctx, false, querypb.ExecuteOptions_DEFAULT) + _, err := txPool.Begin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } db.AddQuery("set transaction isolation level READ COMMITTED", &sqltypes.Result{}) - _, err = txPool.Begin(ctx, false, querypb.ExecuteOptions_READ_COMMITTED) + _, err = txPool.Begin(ctx, &querypb.ExecuteOptions{TransactionIsolation: querypb.ExecuteOptions_READ_COMMITTED}) if err != nil { t.Fatal(err) } @@ -231,7 +284,7 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2006_Transient(t *testing.T) { } ctx := context.Background() - txConn, err := txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + txConn, err := txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatalf("Begin should have succeeded after the retry in DBConn.Exec(): %v", err) } @@ -262,7 +315,7 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2006_Permanent(t *testing.T) { // After that, vttablet will automatically try to reconnect and this fail. // DBConn.Exec() will return the reconnect error as final error and not the // initial connection error. - _, err = txPool.LocalBegin(context.Background(), false, querypb.ExecuteOptions_DEFAULT) + _, err = txPool.LocalBegin(context.Background(), &querypb.ExecuteOptions{}) if err == nil || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Begin did not return the reconnect error: %v", err) } @@ -288,7 +341,7 @@ func TestTxPoolBeginWithPoolConnectionError_Errno2013(t *testing.T) { db.EnableShouldClose() // 2013 is not retryable. DBConn.Exec() fails after the first attempt. - _, err = txPool.Begin(context.Background(), false, querypb.ExecuteOptions_DEFAULT) + _, err = txPool.Begin(context.Background(), &querypb.ExecuteOptions{}) if err == nil || !strings.Contains(err.Error(), "(errno 2013)") { t.Fatalf("Begin must return connection error with MySQL errno 2013: %v", err) } @@ -311,7 +364,7 @@ func primeTxPoolWithConnection(t *testing.T) (*fakesqldb.DB, *TxPool, error) { db.AddQuery("begin", &sqltypes.Result{}) db.AddQuery("rollback", &sqltypes.Result{}) ctx := context.Background() - txConn, err := txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + txConn, err := txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { return nil, nil, err } @@ -328,7 +381,7 @@ func TestTxPoolBeginWithError(t *testing.T) { txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) defer txPool.Close() ctx := context.Background() - _, err := txPool.Begin(ctx, false, querypb.ExecuteOptions_DEFAULT) + _, err := txPool.Begin(ctx, &querypb.ExecuteOptions{}) want := "error: rejected" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("Begin: %v, want %s", err, want) @@ -350,7 +403,7 @@ func TestTxPoolRollbackFail(t *testing.T) { txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) defer txPool.Close() ctx := context.Background() - transactionID, err := txPool.Begin(ctx, false, querypb.ExecuteOptions_DEFAULT) + transactionID, err := txPool.Begin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -384,6 +437,82 @@ func TestTxPoolGetConnNonExistentTransaction(t *testing.T) { } } +func TestTxPoolGetConnRecentlyRemovedTransaction(t *testing.T) { + db := fakesqldb.New(t) + defer db.Close() + ctx := context.Background() + db.AddQuery("begin", &sqltypes.Result{}) + db.AddQuery("commit", &sqltypes.Result{}) + db.AddQuery("rollback", &sqltypes.Result{}) + txPool := newTxPool() + txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + id, err := txPool.Begin(ctx, &querypb.ExecuteOptions{}) + txPool.Close() + + assertErrorMatch := func(id int64, reason string) { + _, err = txPool.Get(id, "for query") + if err == nil { + t.Fatalf("expected error, got nil") + } + want := fmt.Sprintf("transaction %v: ended at .* \\(%v\\)", id, reason) + if m, _ := regexp.MatchString(want, err.Error()); !m { + t.Errorf("Get: %v, want match %s", err, want) + } + } + + assertErrorMatch(id, "pool closed") + + txPool = newTxPool() + txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + + id, err = txPool.Begin(ctx, &querypb.ExecuteOptions{}) + if err := txPool.Commit(ctx, id, &fakeMessageCommitter{}); err != nil { + t.Fatalf("got error: %v", err) + } + + assertErrorMatch(id, "transaction committed") + + id, err = txPool.Begin(ctx, &querypb.ExecuteOptions{}) + if err := txPool.Rollback(ctx, id); err != nil { + t.Fatalf("got error: %v", err) + } + + assertErrorMatch(id, "transaction rolled back") + + txPool.Close() + txPool = newTxPool() + txPool.SetTimeout(1 * time.Millisecond) + txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + defer txPool.Close() + + id, err = txPool.Begin(ctx, &querypb.ExecuteOptions{}) + time.Sleep(5 * time.Millisecond) + + assertErrorMatch(id, "exceeded timeout: 1ms") + + txPool.SetTimeout(1 * time.Hour) + id, err = txPool.Begin(ctx, &querypb.ExecuteOptions{}) + txc, err := txPool.Get(id, "for close") + if err != nil { + t.Fatalf("got error: %v", err) + } + + txc.Close() + txc.Recycle() + + assertErrorMatch(id, "closed") +} + +type fakeMessageCommitter struct { +} + +func (f *fakeMessageCommitter) LockDB(newMessages map[string][]*messager.MessageRow, changedMessages map[string][]string) func() { + return func() {} +} + +func (f *fakeMessageCommitter) UpdateCaches(newMessages map[string][]*messager.MessageRow, changedMessages map[string][]string) { +} + func TestTxPoolExecFailDueToConnFail_Errno2006(t *testing.T) { db := fakesqldb.New(t) defer db.Close() @@ -395,7 +524,7 @@ func TestTxPoolExecFailDueToConnFail_Errno2006(t *testing.T) { ctx := context.Background() // Start the transaction. - txConn, err := txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + txConn, err := txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -433,7 +562,7 @@ func TestTxPoolExecFailDueToConnFail_Errno2013(t *testing.T) { ctx := context.Background() // Start the transaction. - txConn, err := txPool.LocalBegin(ctx, false, querypb.ExecuteOptions_DEFAULT) + txConn, err := txPool.LocalBegin(ctx, &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } @@ -456,6 +585,7 @@ func TestTxPoolExecFailDueToConnFail_Errno2013(t *testing.T) { } func TestTxPoolCloseKillsStrayTransactions(t *testing.T) { + startingStray := tabletenv.InternalErrors.Counts()["StrayTransactions"] db := fakesqldb.New(t) defer db.Close() db.AddQuery("begin", &sqltypes.Result{}) @@ -464,14 +594,14 @@ func TestTxPoolCloseKillsStrayTransactions(t *testing.T) { txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) // Start stray transaction. - _, err := txPool.Begin(context.Background(), false, querypb.ExecuteOptions_DEFAULT) + _, err := txPool.Begin(context.Background(), &querypb.ExecuteOptions{}) if err != nil { t.Fatal(err) } // Close kills stray transaction. txPool.Close() - if got, want := tabletenv.InternalErrors.Counts()["StrayTransactions"], int64(1); got != want { + if got, want := tabletenv.InternalErrors.Counts()["StrayTransactions"]-startingStray, int64(1); got != want { t.Fatalf("internal error count for stray transactions not increased: got = %v, want = %v", got, want) } if got, want := txPool.conns.Capacity(), int64(0); got != want { diff --git a/go/vt/worker/tablet_provider.go b/go/vt/worker/tablet_provider.go index db3a9394263..5ae2174c25e 100644 --- a/go/vt/worker/tablet_provider.go +++ b/go/vt/worker/tablet_provider.go @@ -22,7 +22,6 @@ import ( "golang.org/x/net/context" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/proto/topodata" "github.com/youtube/vitess/go/vt/topo" "github.com/youtube/vitess/go/vt/topo/topoproto" @@ -36,7 +35,7 @@ type tabletProvider interface { // returnTablet must be called after the tablet is no longer used and e.g. // TabletTracker.Untrack() should get called for it. - returnTablet(*topodata.Tablet) + returnTablet(*topodatapb.Tablet) // description returns a string which can be used in error messages e.g. // the name of the keyspace and the shard. @@ -65,7 +64,7 @@ func (p *singleTabletProvider) getTablet() (*topodatapb.Tablet, error) { return tablet.Tablet, err } -func (p *singleTabletProvider) returnTablet(*topodata.Tablet) {} +func (p *singleTabletProvider) returnTablet(*topodatapb.Tablet) {} func (p *singleTabletProvider) description() string { return topoproto.TabletAliasString(p.alias) diff --git a/go/vt/worker/tablet_tracker.go b/go/vt/worker/tablet_tracker.go index 1f41696b7f7..84d723f1165 100644 --- a/go/vt/worker/tablet_tracker.go +++ b/go/vt/worker/tablet_tracker.go @@ -23,8 +23,9 @@ import ( "sync" "github.com/youtube/vitess/go/vt/discovery" - "github.com/youtube/vitess/go/vt/proto/topodata" "github.com/youtube/vitess/go/vt/topo/topoproto" + + topodatapb "github.com/youtube/vitess/go/vt/proto/topodata" ) // TabletTracker tracks for each tablet alias how often it is currently in use @@ -49,7 +50,7 @@ func NewTabletTracker() *TabletTracker { // Track will pick the least used tablet from "stats", increment its usage by 1 // and return it. // "stats" must not be empty. -func (t *TabletTracker) Track(stats []discovery.TabletStats) *topodata.Tablet { +func (t *TabletTracker) Track(stats []discovery.TabletStats) *topodatapb.Tablet { if len(stats) == 0 { panic("stats must not be empty") } @@ -80,7 +81,7 @@ func (t *TabletTracker) Track(stats []discovery.TabletStats) *topodata.Tablet { } // Untrack decrements the usage of "alias" by 1. -func (t *TabletTracker) Untrack(alias *topodata.TabletAlias) { +func (t *TabletTracker) Untrack(alias *topodatapb.TabletAlias) { t.mu.Lock() defer t.mu.Unlock() diff --git a/go/vt/zkctl/zksrv.sh b/go/vt/zkctl/zksrv.sh index 59106de96b3..e210fb9a693 100755 --- a/go/vt/zkctl/zksrv.sh +++ b/go/vt/zkctl/zksrv.sh @@ -55,7 +55,6 @@ fi cmd="$java -DZOO_LOG_DIR=$logdir -cp $classpath org.apache.zookeeper.server.quorum.QuorumPeerMain $config" -start=`/bin/date +%s` log "INFO starting $cmd" $cmd < /dev/null &> /dev/null & pid=$! diff --git a/helm/vitess/templates/_helpers.tpl b/helm/vitess/templates/_helpers.tpl index 4d2233c7e08..6a03c9677c5 100644 --- a/helm/vitess/templates/_helpers.tpl +++ b/helm/vitess/templates/_helpers.tpl @@ -11,6 +11,17 @@ {{end -}} {{- end -}} +############################# +# Repeat a string N times, where N is the total number +# of replicas. Len must be used on the calling end to +# get an int +############################# +{{- define "tablet-count" -}} +{{- range . -}} +{{- repeat (int .vttablet.replicas) "x" -}} +{{- end -}} +{{- end -}} + ############################# # Format a list of flag maps into a command line. ############################# diff --git a/helm/vitess/templates/_orchestrator-conf.tpl b/helm/vitess/templates/_orchestrator-conf.tpl index 5fc03569522..1f2e101c592 100644 --- a/helm/vitess/templates/_orchestrator-conf.tpl +++ b/helm/vitess/templates/_orchestrator-conf.tpl @@ -5,6 +5,7 @@ # set tuple values to more recognizable variables {{- $orc := index . 0 -}} {{- $namespace := index . 1 -}} +{{- $enableHeartbeat := index . 2 -}} apiVersion: v1 kind: ConfigMap @@ -117,7 +118,11 @@ data: "ReduceReplicationAnalysisCount": true, "RejectHostnameResolvePattern": "", "RemoveTextFromHostnameDisplay": ".mydomain.com:3306", +{{ if $enableHeartbeat }} "ReplicationLagQuery": "SELECT unix_timestamp() - floor(ts/1000000000) FROM `_vt`.heartbeat ORDER BY ts DESC LIMIT 1;", +{{ else }} + "ReplicationLagQuery": "", +{{ end }} "ServeAgentsHttp": false, "SkipBinlogEventsContaining": [ ], diff --git a/helm/vitess/templates/_vttablet.tpl b/helm/vitess/templates/_vttablet.tpl index 0f8de94150a..37a8d48d134 100644 --- a/helm/vitess/templates/_vttablet.tpl +++ b/helm/vitess/templates/_vttablet.tpl @@ -47,6 +47,7 @@ spec: {{- $config := index . 7 -}} {{- $pmm := index . 8 -}} {{- $orc := index . 9 -}} +{{- $totalTabletCount := index . 10 -}} # sanitize inputs to create tablet name {{- $cellClean := include "clean-label" $cell.name -}} @@ -54,6 +55,7 @@ spec: {{- $shardClean := include "clean-label" $shard.name -}} {{- $uid := "$(cat /vtdataroot/tabletdata/tablet-uid)" }} {{- $setName := printf "%s-%s-%s-%s" $cellClean $keyspaceClean $shardClean $tablet.type | lower -}} +{{- $shardName := printf "%s-%s-%s" $cellClean $keyspaceClean $shardClean | lower -}} {{- with $tablet.vttablet -}} @@ -103,7 +105,7 @@ spec: containers: {{ include "cont-mysql" (tuple $topology $cell $keyspace $shard $tablet $defaultVttablet $uid) | indent 8 }} -{{ include "cont-vttablet" (tuple $topology $cell $keyspace $shard $tablet $defaultVttablet $vitessTag $uid $namespace $config $orc) | indent 8 }} +{{ include "cont-vttablet" (tuple $topology $cell $keyspace $shard $tablet $defaultVttablet $vitessTag $uid $namespace $config $orc $totalTabletCount) | indent 8 }} {{ include "cont-mysql-errorlog" . | indent 8 }} {{ include "cont-mysql-slowlog" . | indent 8 }} {{ if $pmm.enabled }}{{ include "cont-pmm-client" (tuple $pmm $namespace) | indent 8 }}{{ end }} @@ -140,6 +142,96 @@ spec: shard: {{ $shardClean | quote }} type: {{ $tablet.type | quote }} +{{ if eq $tablet.type "replica" }} +--- +################################### +# vttablet InitShardMaster Job +################################### +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ $shardName }}-init-shard-master +spec: + backoffLimit: 0 + template: + spec: + restartPolicy: OnFailure + containers: + - name: init-shard-master + image: "vitess/vtctlclient:{{$vitessTag}}" + + command: ["bash"] + args: + - "-c" + - | + set -ex + + VTCTLD_SVC=vtctld.{{ $namespace }}:15999 + SECONDS=0 + TIMEOUT_SECONDS=600 + + # poll every 5 seconds to see if vtctld is ready + until vtctlclient -server $VTCTLD_SVC ListAllTablets {{ $cellClean }} > /dev/null 2>&1; do + if (( $SECONDS > $TIMEOUT_SECONDS )); then + echo "timed out waiting for vtctlclient to be ready" + exit 1 + fi + sleep 5 + done + + until [ $TABLETS_READY ]; do + # get all the tablets in the current cell + cellTablets="$(vtctlclient -server $VTCTLD_SVC ListAllTablets {{ $cellClean }})" + + # filter to only the tablets in our current shard + shardTablets=$( echo "$cellTablets" | awk 'substr( $5,1,{{ len $shardName }} ) == "{{ $shardName }}" {print $0}') + + # check for a master tablet from the ListAllTablets call + masterTablet=$( echo "$shardTablets" | awk '$4 == "master" {print $1}') + if [ $masterTablet ]; then + echo "'$masterTablet' is already the master tablet, exiting without running InitShardMaster" + exit + fi + + # check for a master tablet from the GetShard call + master_alias=$(vtctlclient -server $VTCTLD_SVC GetShard {{ $keyspaceClean }}/{{ $shard.name }} | jq '.master_alias.uid') + if [ $master_alias != "null" ]; then + echo "'$master_alias' is already the master tablet, exiting without running InitShardMaster" + exit + fi + + # count the number of newlines for the given shard to get the tablet count + tabletCount=$( echo "$shardTablets" | wc | awk '{print $1}') + + # check to see if the tablet count equals the expected tablet count + if [ $tabletCount == {{ $totalTabletCount }} ]; then + TABLETS_READY=true + else + if (( $SECONDS > $TIMEOUT_SECONDS )); then + echo "timed out waiting for tablets to be ready" + exit 1 + fi + + # wait 5 seconds for vttablets to continue getting ready + sleep 5 + fi + + done + + # find the tablet id for the "-replica-0" stateful set for a given cell, keyspace and shard + tablet_id=$( echo "$shardTablets" | awk 'substr( $5,1,{{ add (len $shardName) 10 }} ) == "{{ $shardName }}-replica-0" {print $1}') + + # initialize the shard master + until vtctlclient -server $VTCTLD_SVC InitShardMaster -force {{ $keyspaceClean }}/{{ $shard.name }} $tablet_id; do + if (( $SECONDS > $TIMEOUT_SECONDS )); then + echo "timed out waiting for InitShardMaster to succeed" + exit 1 + fi + sleep 5 + done + +{{- end -}} + {{- end -}} {{- end -}} @@ -244,6 +336,7 @@ spec: {{- $namespace := index . 8 -}} {{- $config := index . 9 -}} {{- $orc := index . 10 -}} +{{- $totalTabletCount := index . 11 -}} {{- $cellClean := include "clean-label" $cell.name -}} {{- with $tablet.vttablet -}} @@ -321,10 +414,14 @@ spec: -db-config-filtered-uname "vt_filtered" -db-config-filtered-dbname "vt_{{$keyspace.name}}" -db-config-filtered-charset "utf8" - -enable_semi_sync -enable_replication_reporter -{{ if $orc.enabled }} +{{ if $defaultVttablet.enableSemisync }} + -enable_semi_sync +{{ end }} +{{ if $defaultVttablet.enableHeartbeat }} -heartbeat_enable +{{ end }} +{{ if $orc.enabled }} -orc_api_url "http://orchestrator.{{ $namespace }}/api" -orc_discover_interval "5m" {{ end }} @@ -475,4 +572,4 @@ affinity: app: "vitess" component: "vttablet" -{{- end -}} \ No newline at end of file +{{- end -}} diff --git a/helm/vitess/templates/vitess.yaml b/helm/vitess/templates/vitess.yaml index 407c855d99d..521bd188100 100644 --- a/helm/vitess/templates/vitess.yaml +++ b/helm/vitess/templates/vitess.yaml @@ -15,7 +15,7 @@ {{ include "orchestrator" (tuple $.Values.orchestrator) }} --- # create orchestrator config map -{{ include "orchestrator-config" (tuple $.Values.orchestrator $.Release.Namespace) }} +{{ include "orchestrator-config" (tuple $.Values.orchestrator $.Release.Namespace $.Values.vttablet.enableHeartbeat) }} --- # create a Service per StatefulSet replica {{ range $i := until (int $.Values.orchestrator.replicas) }} @@ -53,9 +53,14 @@ # Tablets for keyspaces {{ range $keyspace := $cell.keyspaces }} {{ range $shard := $keyspace.shards }} + + {{ $totalTabletCount := len (include "tablet-count" $shard.tablets) }} + + # now range through the tablets again to set them up {{ range $tablet := $shard.tablets }} --- -{{ include "vttablet" (tuple $.Values.topology $cell $keyspace $shard $tablet $.Values.vttablet $.Release.Namespace $.Values.config $.Values.pmm $.Values.orchestrator) }} +{{ include "vttablet" (tuple $.Values.topology $cell $keyspace $shard $tablet $.Values.vttablet $.Release.Namespace $.Values.config $.Values.pmm $.Values.orchestrator $totalTabletCount) }} + {{ end }} # range $tablet {{ end }} # range $shard {{ end }} # range $keyspace diff --git a/helm/vitess/values.yaml b/helm/vitess/values.yaml index 276e77781c8..0f4fcd84e02 100644 --- a/helm/vitess/values.yaml +++ b/helm/vitess/values.yaml @@ -130,6 +130,13 @@ vttablet: mysqlImage: "percona:5.7.20" # mysqlImage: "mysql:5.7.20" # mysqlImage: "mariadb:10.3.4" + + enableHeartbeat: false + + # This requires at least 2 instances of "replica" tablet types, otherwise semi-sync + # will block forever. "rdonly" tablets do not ACK. + enableSemisync: false + resources: # common production values 2-4CPU/4-8Gi RAM limits: diff --git a/java/client/src/test/java/io/vitess/client/RpcClientTest.java b/java/client/src/test/java/io/vitess/client/RpcClientTest.java index f48f723a4e4..97301296a3d 100644 --- a/java/client/src/test/java/io/vitess/client/RpcClientTest.java +++ b/java/client/src/test/java/io/vitess/client/RpcClientTest.java @@ -214,7 +214,7 @@ private void waitForVtgateclienttest() throws SQLException, InterruptedException } System.out.format("Waiting until vtgateclienttest is ready and responds (got exception: %s)\n", rootCause); - Thread.sleep(TimeUnit.MILLISECONDS.toMillis(100)); + Thread.sleep(100 /* milliseconds */); waited = true; } } diff --git a/proto/mysqlctl.proto b/proto/mysqlctl.proto index f0410373bad..eecce332897 100644 --- a/proto/mysqlctl.proto +++ b/proto/mysqlctl.proto @@ -41,10 +41,15 @@ message ReinitConfigRequest{} message ReinitConfigResponse{} +message RefreshConfigRequest{} + +message RefreshConfigResponse{} + // MysqlCtl is the service definition service MysqlCtl { rpc Start(StartRequest) returns (StartResponse) {}; rpc Shutdown(ShutdownRequest) returns (ShutdownResponse) {}; rpc RunMysqlUpgrade(RunMysqlUpgradeRequest) returns (RunMysqlUpgradeResponse) {}; rpc ReinitConfig(ReinitConfigRequest) returns (ReinitConfigResponse) {}; + rpc RefreshConfig(RefreshConfigRequest) returns (RefreshConfigResponse) {}; } diff --git a/test/initial_sharding.py b/test/initial_sharding.py index 954722209e0..23ab5e2c726 100755 --- a/test/initial_sharding.py +++ b/test/initial_sharding.py @@ -1,13 +1,13 @@ #!/usr/bin/env python # # Copyright 2017 Google Inc. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -243,9 +243,9 @@ def test_resharding(self): shard_master.start_vttablet(wait_for_state=None, binlog_use_v3_resharding_mode=False) shard_replica.start_vttablet(wait_for_state=None, - binlog_use_v3_resharding_mode=False) + binlog_use_v3_resharding_mode=False) shard_rdonly1.start_vttablet(wait_for_state=None, - binlog_use_v3_resharding_mode=False) + binlog_use_v3_resharding_mode=False) for t in [shard_master, shard_replica, shard_rdonly1]: t.wait_for_vttablet_state('NOT_SERVING') @@ -271,16 +271,25 @@ def test_resharding(self): # (that is the tablet_refresh_interval parameter for discovery gateway) # we want cache_ttl at zero so we re-read the topology for every test query. if use_l2vtgate: - l2vtgate1 = utils.L2VtGate() - l2vtgate1.start(tablets= + l2vtgate1 = utils.VtGate() + l2vtgate1.start(extra_args=['--enable_forwarding'], tablets= [shard_master, shard_replica, shard_rdonly1]) l2vtgate1.wait_for_endpoints('test_keyspace.0.master', 1) l2vtgate1.wait_for_endpoints('test_keyspace.0.replica', 1) l2vtgate1.wait_for_endpoints('test_keyspace.0.rdonly', 1) - _, addr = l2vtgate1.rpc_endpoint() - l2vtgate1_param = '%s|test_keyspace|0' % addr - utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_param,]) + _, l2vtgate1_addr = l2vtgate1.rpc_endpoint() + + # Clear utils.vtgate, so it doesn't point to the previous l2vtgate1. + utils.vtgate = None + utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_addr,], + extra_args=['-disable_local_gateway']) + utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1, + var='L2VtgateConnections') + utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1, + var='L2VtgateConnections') + utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1, + var='L2VtgateConnections') else: utils.VtGate().start(cache_ttl='0', tablets=[ @@ -375,11 +384,13 @@ def test_resharding(self): if use_l2vtgate: l2vtgate1.kill() - l2vtgate1 = utils.L2VtGate() - l2vtgate1.start(tablets= - [shard_master, shard_replica, shard_rdonly1, - shard_0_master, shard_0_replica, shard_0_rdonly1], - tablet_filters='test_keyspace|0,test_keyspace|-80') + l2vtgate1 = utils.VtGate() + l2vtgate1.start(extra_args=['--enable_forwarding', + '-tablet_filters', + 'test_keyspace|0,test_keyspace|-80'], + tablets=[shard_master, shard_replica, shard_rdonly1, + shard_0_master, shard_0_replica, + shard_0_rdonly1]) l2vtgate1.wait_for_endpoints('test_keyspace.0.master', 1) l2vtgate1.wait_for_endpoints('test_keyspace.0.replica', 1) l2vtgate1.wait_for_endpoints('test_keyspace.0.rdonly', 1) @@ -395,10 +406,11 @@ def test_resharding(self): # is test_keyspace.0. This is not ideal, we should re-work # which keyspace/shard a l2vtgate can wait for, as the ones # filtered by tablet_filters. - l2vtgate2 = utils.L2VtGate() - l2vtgate2.start(tablets= + l2vtgate2 = utils.VtGate() + l2vtgate2.start(extra_args=['--enable_forwarding', + '-tablet_filters', + 'test_keyspace|80-'], tablets= [shard_1_master, shard_1_replica, shard_1_rdonly1], - tablet_filters='test_keyspace|80-', tablet_types_to_wait='') l2vtgate2.wait_for_endpoints('test_keyspace.80-.master', 1) l2vtgate2.wait_for_endpoints('test_keyspace.80-.replica', 1) @@ -410,29 +422,32 @@ def test_resharding(self): l2vtgate2.verify_no_endpoint('test_keyspace.-80.replica') l2vtgate2.verify_no_endpoint('test_keyspace.-80.rdonly') - _, addr1 = l2vtgate1.rpc_endpoint() - _, addr2 = l2vtgate2.rpc_endpoint() - l2vtgate1_param1 = '%s|test_keyspace|0' % addr1 - l2vtgate1_param2 = '%s|test_keyspace|-80' % addr1 - l2vtgate2_param = '%s|test_keyspace|80-' % addr2 - utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_param1, - l2vtgate1_param2, - l2vtgate2_param,]) + _, l2vtgate1_addr = l2vtgate1.rpc_endpoint() + _, l2vtgate2_addr = l2vtgate2.rpc_endpoint() + utils.vtgate = None + utils.VtGate().start(cache_ttl='0', l2vtgates=[l2vtgate1_addr, + l2vtgate2_addr,], + extra_args=['-disable_local_gateway']) + var = 'L2VtgateConnections' else: + utils.vtgate = None utils.VtGate().start(cache_ttl='0', tablets=[ shard_master, shard_replica, shard_rdonly1, shard_0_master, shard_0_replica, shard_0_rdonly1, shard_1_master, shard_1_replica, shard_1_rdonly1]) - utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.-80.replica', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.80-.master', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1) - utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1) + var = None + + # Wait for the endpoints, either local or remote. + utils.vtgate.wait_for_endpoints('test_keyspace.0.master', 1, var=var) + utils.vtgate.wait_for_endpoints('test_keyspace.0.replica', 1, var=var) + utils.vtgate.wait_for_endpoints('test_keyspace.0.rdonly', 1, var=var) + utils.vtgate.wait_for_endpoints('test_keyspace.-80.master', 1, var=var) + utils.vtgate.wait_for_endpoints('test_keyspace.-80.replica', 1, var=var) + utils.vtgate.wait_for_endpoints('test_keyspace.-80.rdonly', 1, var=var) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.master', 1, var=var) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.replica', 1, var=var) + utils.vtgate.wait_for_endpoints('test_keyspace.80-.rdonly', 1, var=var) # check the Map Reduce API works correctly, should use ExecuteKeyRanges now, # as we are sharded (with just one shard). @@ -465,7 +480,7 @@ def test_resharding(self): # Run vtworker as daemon for the following SplitClone commands. worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg( ['--cell', 'test_nj', '--command_display_interval', '10ms', - '--use_v3_resharding_mode=false'], + '--use_v3_resharding_mode=false'], auto_log=True) # Initial clone (online). diff --git a/test/initial_sharding_l2vtgate.py b/test/initial_sharding_l2vtgate.py index b2118b9d4a7..5a2a3df68a8 100755 --- a/test/initial_sharding_l2vtgate.py +++ b/test/initial_sharding_l2vtgate.py @@ -1,13 +1,13 @@ #!/usr/bin/env python # # Copyright 2017 Google Inc. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. diff --git a/test/tablet.py b/test/tablet.py index f064298c7ff..5b06a4db2e5 100644 --- a/test/tablet.py +++ b/test/tablet.py @@ -571,11 +571,15 @@ def start_vttablet( # When vttablet restores from backup, it will re-generate the .cnf file. # So we need to have EXTRA_MY_CNF set properly. - all_extra_my_cnf = get_all_extra_my_cnf(None) - if all_extra_my_cnf: - if not extra_env: - extra_env = {} - extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf) + # When using mysqlctld, only mysqlctld should need EXTRA_MY_CNF. + # If any test fails without giving EXTRA_MY_CNF to vttablet, + # it means we missed some call that should run remotely on mysqlctld. + if not self.use_mysqlctld: + all_extra_my_cnf = get_all_extra_my_cnf(None) + if all_extra_my_cnf: + if not extra_env: + extra_env = {} + extra_env['EXTRA_MY_CNF'] = ':'.join(all_extra_my_cnf) if extra_args: args.extend(extra_args) diff --git a/test/utils.py b/test/utils.py index cfe566b6d63..aacc3d0a7e6 100644 --- a/test/utils.py +++ b/test/utils.py @@ -554,19 +554,13 @@ def start(self, cell='test_nj', retry_count=2, '-tablet_protocol', protocols_flavor().tabletconn_protocol(), '-stderrthreshold', get_log_level(), '-normalize_queries', + '-gateway_implementation', vtgate_gateway_flavor().flavor(), ] + args.extend(vtgate_gateway_flavor().flags(cell=cell, tablets=tablets)) if l2vtgates: - args.extend([ - '-gateway_implementation', 'l2vtgategateway', - '-l2vtgategateway_addrs', ','.join(l2vtgates), - ]) - else: - args.extend([ - '-gateway_implementation', vtgate_gateway_flavor().flavor(), - ]) - args.extend(vtgate_gateway_flavor().flags(cell=cell, tablets=tablets)) - if tablet_types_to_wait: - args.extend(['-tablet_types_to_wait', tablet_types_to_wait]) + args.extend(['-l2vtgate_addrs', ','.join(l2vtgates)]) + if tablet_types_to_wait: + args.extend(['-tablet_types_to_wait', tablet_types_to_wait]) if protocols_flavor().vtgate_protocol() == 'grpc': args.extend(['-grpc_port', str(self.grpc_port)]) @@ -764,109 +758,21 @@ def split_query(self, sql, keyspace, split_count, bindvars=None): args.append(sql) return run_vtctl_json(args) - def wait_for_endpoints(self, name, count, timeout=20.0): + def wait_for_endpoints(self, name, count, timeout=20.0, var=None): """waits until vtgate gets endpoints. Args: name: name of the endpoint, in the form: 'keyspace.shard.type'. count: how many endpoints to wait for. timeout: how long to wait. + var: name of the variable to use. if None, defaults to the gateway's. """ wait_for_vars('vtgate', self.port, - var=vtgate_gateway_flavor().connection_count_vars(), - key=name, value=count, timeout=timeout) - - -class L2VtGate(object): - """L2VtGate object represents a l2vtgate process.""" - - def __init__(self, port=None): - """Creates the L2VTGate instance and reserve the ports if necessary.""" - self.port = port or environment.reserve_ports(1) - if protocols_flavor().vtgate_protocol() == 'grpc': - self.grpc_port = environment.reserve_ports(1) - self.proc = None - - def start(self, cell='test_nj', retry_count=2, - topo_impl=None, cache_ttl='1s', - extra_args=None, tablets=None, - tablet_types_to_wait='MASTER,REPLICA', - tablet_filters=None): - """Start l2vtgate.""" - - args = environment.binary_args('l2vtgate') + [ - '-port', str(self.port), - '-cell', cell, - '-retry-count', str(retry_count), - '-log_dir', environment.vtlogroot, - '-srv_topo_cache_ttl', cache_ttl, - '-srv_topo_cache_refresh', cache_ttl, - '-tablet_protocol', protocols_flavor().tabletconn_protocol(), - '-gateway_implementation', vtgate_gateway_flavor().flavor(), - ] - args.extend(vtgate_gateway_flavor().flags(cell=cell, tablets=tablets)) - if tablet_types_to_wait: - args.extend(['-tablet_types_to_wait', tablet_types_to_wait]) - if tablet_filters: - args.extend(['-tablet_filters', tablet_filters]) - if protocols_flavor().vtgate_protocol() == 'grpc': - args.extend(['-grpc_port', str(self.grpc_port)]) - if protocols_flavor().service_map(): - args.extend(['-service_map', ','.join(protocols_flavor().service_map())]) - if topo_impl: - args.extend(['-topo_implementation', topo_impl]) - else: - args.extend(environment.topo_server().flags()) - if extra_args: - args.extend(extra_args) - - self.proc = run_bg(args) - # We use a longer timeout here, as we may be waiting for the initial - # state of a few tablets. - wait_for_vars('l2vtgate', self.port, timeout=20.0) - - def kill(self): - """Terminates the l2vtgate process, and waits for it to exit. - """ - if self.proc is None: - return - kill_sub_process(self.proc, soft=True) - self.proc.wait() - self.proc = None - - def addr(self): - """Returns the address of the l2vtgate process, for web access.""" - return 'localhost:%d' % self.port - - def rpc_endpoint(self): - """Returns the protocol and endpoint to use for RPCs.""" - protocol = protocols_flavor().vtgate_protocol() - if protocol == 'grpc': - return protocol, 'localhost:%d' % self.grpc_port - return protocol, self.addr() - - def get_status(self): - """Returns the status page for this process.""" - return get_status(self.port) - - def get_vars(self): - """Returns the vars for this process.""" - return get_vars(self.port) - - def wait_for_endpoints(self, name, count, timeout=20.0): - """waits until l2vtgate gets endpoints. - - Args: - name: name of the endpoint, in the form: 'keyspace.shard.type'. - count: how many endpoints to wait for. - timeout: how long to wait. - """ - wait_for_vars('l2vtgate', self.port, - var=vtgate_gateway_flavor().connection_count_vars(), + var=var or vtgate_gateway_flavor().connection_count_vars(), key=name, value=count, timeout=timeout) def verify_no_endpoint(self, name): - """verifies the l2vtgate doesn't have any enpoint of the given name. + """verifies the vtgate doesn't have any enpoint of the given name. Args: name: name of the endpoint, in the form: 'keyspace.shard.type'. diff --git a/test/vtgatev2_l2vtgate_test.py b/test/vtgatev2_l2vtgate_test.py index a7e7604bcfd..cf869a2701e 100755 --- a/test/vtgatev2_l2vtgate_test.py +++ b/test/vtgatev2_l2vtgate_test.py @@ -1,13 +1,13 @@ #!/usr/bin/env python # # Copyright 2017 Google Inc. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,11 +16,11 @@ """Re-runs vtgatev2_test.py with a l2vtgate process.""" -import vtgatev2_test import utils +import vtgatev2_test -# this test is just re-running an entire vtgatev2_test.py with a -# l2vtgate process in the middle +# This test is just re-running an entire vtgatev2_test.py with a +# l2vtgate process in the middle. if __name__ == '__main__': vtgatev2_test.use_l2vtgate = True utils.main(vtgatev2_test) diff --git a/test/vtgatev2_test.py b/test/vtgatev2_test.py index 1f167f36670..a95d640202f 100755 --- a/test/vtgatev2_test.py +++ b/test/vtgatev2_test.py @@ -42,8 +42,8 @@ # l2vtgate is the L2VTGate object, if any l2vtgate = None -# l2vtgate_param is the parameter to send to vtgate -l2vtgate_param = None +# l2vtgate_addr is the address of the l2vtgate to send to vtgate +l2vtgate_addr = None shard_0_master = tablet.Tablet() shard_0_replica1 = tablet.Tablet() @@ -184,7 +184,7 @@ def tearDownModule(): def setup_tablets(): """Start up a master mysql and vttablet.""" - global l2vtgate, l2vtgate_param + global l2vtgate, l2vtgate_addr logging.debug('Setting up tablets') utils.run_vtctl(['CreateKeyspace', KEYSPACE_NAME]) @@ -253,13 +253,18 @@ def setup_tablets(): 'Partitions(replica): -80 80-\n') if use_l2vtgate: - l2vtgate = utils.L2VtGate() - l2vtgate.start(tablets= + l2vtgate = utils.VtGate() + l2vtgate.start(extra_args=['--enable_forwarding'], tablets= [shard_0_master, shard_0_replica1, shard_0_replica2, shard_1_master, shard_1_replica1, shard_1_replica2]) - _, addr = l2vtgate.rpc_endpoint() - l2vtgate_param = '%s|%s|%s' % (addr, KEYSPACE_NAME, '-') - utils.VtGate().start(l2vtgates=[l2vtgate_param,]) + _, l2vtgate_addr = l2vtgate.rpc_endpoint() + + # Clear utils.vtgate, so it doesn't point to the previous l2vtgate. + utils.vtgate = None + + # This vgate doesn't watch any local tablets, so we disable_local_gateway. + utils.VtGate().start(l2vtgates=[l2vtgate_addr,], + extra_args=['-disable_local_gateway']) else: utils.VtGate().start(tablets= @@ -271,7 +276,8 @@ def setup_tablets(): def restart_vtgate(port): if use_l2vtgate: - utils.VtGate(port=port).start(l2vtgates=[l2vtgate_param,]) + utils.VtGate(port=port).start(l2vtgates=[l2vtgate_addr,], + extra_args=['-disable_local_gateway']) else: utils.VtGate(port=port).start( tablets=[shard_0_master, shard_0_replica1, shard_0_replica2, @@ -280,7 +286,10 @@ def restart_vtgate(port): def wait_for_endpoints(name, count): if use_l2vtgate: + # Wait for the l2vtgate to have a healthy connection. l2vtgate.wait_for_endpoints(name, count) + # Also wait for vtgate to have received the remote healthy connection. + utils.vtgate.wait_for_endpoints(name, count, var='L2VtgateConnections') else: utils.vtgate.wait_for_endpoints(name, count) @@ -411,8 +420,8 @@ def test_query_routing(self): before1 = v['VttabletCall']['Histograms'][key1]['Count'] if use_l2vtgate: lv = l2vtgate.get_vars() - lbefore0 = lv['VttabletCall']['Histograms'][key0]['Count'] - lbefore1 = lv['VttabletCall']['Histograms'][key1]['Count'] + lbefore0 = lv['QueryServiceCall']['Histograms'][key0]['Count'] + lbefore1 = lv['QueryServiceCall']['Histograms'][key1]['Count'] cursor = vtgate_conn.cursor( tablet_type='master', keyspace=KEYSPACE_NAME, @@ -428,8 +437,8 @@ def test_query_routing(self): self.assertEqual(after1 - before1, 1) if use_l2vtgate: lv = l2vtgate.get_vars() - lafter0 = lv['VttabletCall']['Histograms'][key0]['Count'] - lafter1 = lv['VttabletCall']['Histograms'][key1]['Count'] + lafter0 = lv['QueryServiceCall']['Histograms'][key0]['Count'] + lafter1 = lv['QueryServiceCall']['Histograms'][key1]['Count'] self.assertEqual(lafter0 - lbefore0, 1) self.assertEqual(lafter1 - lbefore1, 1) diff --git a/test/vtgatev3_test.py b/test/vtgatev3_test.py index 5665a28e398..af8a894d41d 100755 --- a/test/vtgatev3_test.py +++ b/test/vtgatev3_test.py @@ -887,6 +887,26 @@ def test_user2(self): 'delete from vt_user2 where id = :id', {'id': 7}) vtgate_conn.commit() + vtgate_conn.begin() + self.execute_on_master( + vtgate_conn, + 'delete from vt_user2 where id = :id', + {'id': 3}) + vtgate_conn.commit() + + # Test multi shard delete + vtgate_conn.begin() + result = self.execute_on_master( + vtgate_conn, + 'insert into vt_user (id, name) values (:id0, :name0),(:id1, :name1)', + {'id0': 22, 'name0': 'name2', 'id1': 33, 'name1': 'name2'}) + self.assertEqual(result, ([], 2L, 0L, [])) + result = self.execute_on_master( + vtgate_conn, + 'delete from vt_user where id > :id', + {'id': 20}) + self.assertEqual(result, ([], 2L, 0L, [])) + vtgate_conn.commit() def test_user_truncate(self): vtgate_conn = get_connection() @@ -896,7 +916,25 @@ def test_user_truncate(self): 'insert into vt_user2 (id, name) values (:id, :name)', {'id': 1, 'name': 'name1'}) self.assertEqual(result, ([], 1L, 0L, [])) + result = self.execute_on_master( + vtgate_conn, + 'insert into vt_user2 (id, name) values (:id, :name)', + {'id': 7, 'name': 'name1'}) + self.assertEqual(result, ([], 1L, 0L, [])) + result = self.execute_on_master( + vtgate_conn, + 'insert into vt_user2 (id, name) values (:id0, :name0),(:id1, :name1)', + {'id0': 2, 'name0': 'name2', 'id1': 3, 'name1': 'name2'}) + self.assertEqual(result, ([], 2L, 0L, [])) vtgate_conn.commit() + result = shard_0_master.mquery('vt_user', 'select id, name from vt_user2') + self.assertEqual(result, ((1L, 'name1'), (2L, 'name2'), (3L, 'name2'))) + result = shard_1_master.mquery('vt_user', 'select id, name from vt_user2') + self.assertEqual(result, ((7L, 'name1'),)) + result = lookup_master.mquery( + 'vt_lookup', 'select name, user2_id from name_user2_map') + self.assertEqual(result, (('name1', 1L), ('name1', 7L), ('name2', 2L), + ('name2', 3L))) vtgate_conn.begin() result = vtgate_conn._execute( 'truncate vt_user2',