diff --git a/.config/dotnet-tools.json b/.config/dotnet-tools.json index 036b867bd7e07..7643e9c1c3575 100644 --- a/.config/dotnet-tools.json +++ b/.config/dotnet-tools.json @@ -15,7 +15,7 @@ ] }, "microsoft.dotnet.xharness.cli": { - "version": "8.0.0-prerelease.23471.1", + "version": "8.0.0-prerelease.23477.1", "commands": [ "xharness" ] diff --git a/.github/fabricbot.json b/.github/fabricbot.json index ff4b8e8acc436..e46c355ff4d0e 100644 --- a/.github/fabricbot.json +++ b/.github/fabricbot.json @@ -13507,12 +13507,12 @@ "subCapability": "IssuesOnlyResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Moved to Another Area", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Moved to Another Area", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } } @@ -13703,7 +13703,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } } @@ -13718,19 +13718,19 @@ "subCapability": "IssuesOnlyResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Needs Triage", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Needs Triage", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } }, { "name": "addToProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Needs Triage", "isOrgProject": true } @@ -13976,7 +13976,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } } @@ -13985,7 +13985,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true, "columnName": "Triaged" } @@ -14074,19 +14074,19 @@ "subCapability": "IssueCommentResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Needs Further Triage", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Needs Further Triage", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } }, { "name": "addToProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Needs Triage", "isOrgProject": true } @@ -14218,7 +14218,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } } @@ -14227,7 +14227,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Triaged", "isOrgProject": true } @@ -14316,12 +14316,12 @@ "subCapability": "IssuesOnlyResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Triaged", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Triaged", "actions": [ { "name": "addToProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Triaged", "isOrgProject": true } @@ -14343,7 +14343,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } }, @@ -14385,12 +14385,12 @@ "subCapability": "IssuesOnlyResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Eirik Updated Issue", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Eirik Updated Issue", "actions": [ { "name": "moveToProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Triage: Eirik", "isOrgProject": true } @@ -14406,7 +14406,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true, "columnName": "Needs Triage" } @@ -14468,12 +14468,12 @@ "subCapability": "IssueCommentResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Eirik Commented", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Eirik Commented", "actions": [ { "name": "moveToProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Triage: Eirik", "isOrgProject": true } @@ -14489,7 +14489,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true, "columnName": "Needs Triage" } @@ -14551,12 +14551,12 @@ "subCapability": "IssuesOnlyResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Krzysztof Updated Issue", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Krzysztof Updated Issue", "actions": [ { "name": "moveToProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Triage: Krzysztof", "isOrgProject": true } @@ -14572,7 +14572,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true, "columnName": "Needs Triage" } @@ -14634,12 +14634,12 @@ "subCapability": "IssueCommentResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Krzysztof Commented", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Krzysztof Commented", "actions": [ { "name": "moveToProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Triage: Krzysztof", "isOrgProject": true } @@ -14655,7 +14655,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true, "columnName": "Needs Triage" } @@ -14717,178 +14717,12 @@ "subCapability": "IssuesOnlyResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Layomi Updated Issue", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Tarek Updated Issue", "actions": [ { "name": "moveToProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", - "columnName": "Triage: Layomi", - "isOrgProject": true - } - } - ], - "eventType": "issue", - "eventNames": [ - "issues" - ], - "conditions": { - "operator": "and", - "operands": [ - { - "name": "isInProjectColumn", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", - "isOrgProject": true, - "columnName": "Needs Triage" - } - }, - { - "name": "isActivitySender", - "parameters": { - "user": "layomia" - } - }, - { - "operator": "and", - "operands": [ - { - "name": "isOpen", - "parameters": {} - }, - { - "operator": "not", - "operands": [ - { - "name": "isInMilestone", - "parameters": {} - } - ] - }, - { - "operator": "not", - "operands": [ - { - "name": "hasLabel", - "parameters": { - "label": "needs-author-action" - } - } - ] - }, - { - "operator": "not", - "operands": [ - { - "name": "hasLabel", - "parameters": { - "label": "api-ready-for-review" - } - } - ] - } - ] - } - ] - } - } - }, - { - "taskSource": "fabricbot-config", - "taskType": "trigger", - "capabilityId": "IssueResponder", - "subCapability": "IssueCommentResponder", - "version": "1.0", - "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Layomi Commented", - "actions": [ - { - "name": "moveToProjectColumn", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", - "columnName": "Triage: Layomi", - "isOrgProject": true - } - } - ], - "eventType": "issue", - "eventNames": [ - "issue_comment" - ], - "conditions": { - "operator": "and", - "operands": [ - { - "name": "isInProjectColumn", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", - "isOrgProject": true, - "columnName": "Needs Triage" - } - }, - { - "name": "isActivitySender", - "parameters": { - "user": "layomia" - } - }, - { - "operator": "and", - "operands": [ - { - "name": "isOpen", - "parameters": {} - }, - { - "operator": "not", - "operands": [ - { - "name": "isInMilestone", - "parameters": {} - } - ] - }, - { - "operator": "not", - "operands": [ - { - "name": "hasLabel", - "parameters": { - "label": "needs-author-action" - } - } - ] - }, - { - "operator": "not", - "operands": [ - { - "name": "hasLabel", - "parameters": { - "label": "api-ready-for-review" - } - } - ] - } - ] - } - ] - } - } - }, - { - "taskSource": "fabricbot-config", - "taskType": "trigger", - "capabilityId": "IssueResponder", - "subCapability": "IssuesOnlyResponder", - "version": "1.0", - "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Tarek Updated Issue", - "actions": [ - { - "name": "moveToProjectColumn", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Triage: Tarek", "isOrgProject": true } @@ -14904,7 +14738,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true, "columnName": "Needs Triage" } @@ -14966,12 +14800,12 @@ "subCapability": "IssueCommentResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Tarek Commented", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Tarek Commented", "actions": [ { "name": "moveToProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "columnName": "Triage: Tarek", "isOrgProject": true } @@ -14987,7 +14821,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true, "columnName": "Needs Triage" } @@ -15049,12 +14883,12 @@ "subCapability": "IssuesOnlyResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage] Excluded", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - Issue Triage] Excluded", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } } @@ -15069,7 +14903,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - Issue Triage", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - Issue Triage", "isOrgProject": true } }, @@ -15125,12 +14959,12 @@ "subCapability": "IssuesOnlyResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] Moved to Another Area", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - PRs] Moved to Another Area", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } } @@ -15321,7 +15155,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } } @@ -15336,12 +15170,12 @@ "subCapability": "PullRequestResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] Closed, Merged, or Moved", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - PRs] Closed, Merged, or Moved", "actions": [ { "name": "moveToProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Done", "isOrgProject": true } @@ -15357,7 +15191,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } }, @@ -15367,7 +15201,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Done", "isOrgProject": true } @@ -15569,12 +15403,12 @@ "subCapability": "PullRequestResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] New PR Needs Champion", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - PRs] New PR Needs Champion", "actions": [ { "name": "addToProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Needs Champion", "isOrgProject": true } @@ -15737,30 +15571,6 @@ ] } ], - [ - { - "operator": "not", - "operands": [ - { - "name": "isAssignedToUser", - "parameters": { - "user": "layomia" - } - } - ] - }, - { - "operator": "not", - "operands": [ - { - "name": "isActivitySender", - "parameters": { - "user": "layomia" - } - } - ] - } - ], [ { "operator": "not", @@ -15795,7 +15605,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } } @@ -15804,7 +15614,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Done", "isOrgProject": true } @@ -15893,19 +15703,19 @@ "subCapability": "PullRequestResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] Updated PR Needs Champion", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - PRs] Updated PR Needs Champion", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } }, { "name": "addToProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Needs Champion", "isOrgProject": true } @@ -16050,17 +15860,6 @@ } ] }, - { - "operator": "not", - "operands": [ - { - "name": "isAssignedToUser", - "parameters": { - "user": "layomia" - } - } - ] - }, { "operator": "not", "operands": [ @@ -16081,7 +15880,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } } @@ -16090,7 +15889,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Done", "isOrgProject": true } @@ -16179,19 +15978,19 @@ "subCapability": "PullRequestResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] Eirik Assigned as Champion", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - PRs] Eirik Assigned as Champion", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } }, { "name": "addToProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Champion: Eirik", "isOrgProject": true } @@ -16340,7 +16139,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } } @@ -16349,7 +16148,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Needs Champion", "isOrgProject": true } @@ -16357,7 +16156,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Done", "isOrgProject": true } @@ -16375,19 +16174,19 @@ "subCapability": "PullRequestResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] Krzysztof Assigned as Champion", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - PRs] Krzysztof Assigned as Champion", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } }, { "name": "addToProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Champion: Krzysztof", "isOrgProject": true } @@ -16536,203 +16335,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", - "isOrgProject": true - } - } - ] - }, - { - "name": "isInProjectColumn", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", - "columnName": "Needs Champion", - "isOrgProject": true - } - }, - { - "name": "isInProjectColumn", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", - "columnName": "Done", - "isOrgProject": true - } - } - ] - } - ] - } - } - }, - { - "taskSource": "fabricbot-config", - "taskType": "trigger", - "capabilityId": "IssueResponder", - "subCapability": "PullRequestResponder", - "version": "1.0", - "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] Layomi Assigned as Champion", - "actions": [ - { - "name": "removeFromProject", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", - "isOrgProject": true - } - }, - { - "name": "addToProject", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", - "columnName": "Champion: Layomi", - "isOrgProject": true - } - } - ], - "eventType": "pull_request", - "eventNames": [ - "pull_request" - ], - "conditions": { - "operator": "and", - "operands": [ - { - "name": "isOpen", - "parameters": {} - }, - { - "operator": "or", - "operands": [ - { - "name": "hasLabel", - "parameters": { - "label": "area-Extensions-Configuration" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-Extensions-Logging" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-Extensions-Options" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-Extensions-Primitives" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.Diagnostics.Activity" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.Globalization" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.Collections" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.ComponentModel.DataAnnotations" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.DateTime" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.IO.Ports" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.Linq" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.Text.Encoding" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.Text.Encodings.Web" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.Text.Json" - } - }, - { - "name": "hasLabel", - "parameters": { - "label": "area-System.Xml" - } - } - ] - }, - { - "operator": "or", - "operands": [ - { - "name": "isAssignedToUser", - "parameters": { - "user": "layomia" - } - }, - { - "operator": "and", - "operands": [ - { - "name": "isAction", - "parameters": { - "action": "opened" - } - }, - { - "name": "isActivitySender", - "parameters": { - "user": "layomia" - } - } - ] - } - ] - }, - { - "operator": "or", - "operands": [ - { - "operator": "not", - "operands": [ - { - "name": "isInProject", - "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } } @@ -16741,7 +16344,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Needs Champion", "isOrgProject": true } @@ -16749,7 +16352,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Done", "isOrgProject": true } @@ -16767,19 +16370,19 @@ "subCapability": "PullRequestResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] Tarek Assigned as Champion", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - PRs] Tarek Assigned as Champion", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } }, { "name": "addToProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Champion: Tarek", "isOrgProject": true } @@ -16928,7 +16531,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } } @@ -16937,7 +16540,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Needs Champion", "isOrgProject": true } @@ -16945,7 +16548,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Done", "isOrgProject": true } @@ -16963,12 +16566,12 @@ "subCapability": "PullRequestResponder", "version": "1.0", "config": { - "taskName": "[Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs] Excluded", + "taskName": "[Area Pod: Eirik / Krzysztof / Tarek - PRs] Excluded", "actions": [ { "name": "removeFromProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } } @@ -16983,7 +16586,7 @@ { "name": "isInProject", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "isOrgProject": true } }, @@ -16993,7 +16596,7 @@ { "name": "isInProjectColumn", "parameters": { - "projectName": "Area Pod: Eirik / Krzysztof / Layomi / Tarek - PRs", + "projectName": "Area Pod: Eirik / Krzysztof / Tarek - PRs", "columnName": "Done", "isOrgProject": true } @@ -20597,4 +20200,4 @@ } } } -] +] \ No newline at end of file diff --git a/Directory.Build.props b/Directory.Build.props index 4d3e8a8408abf..4e86c1d212081 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -89,8 +89,9 @@ 8.0 - net$(NetCoreAppPreviousVersion) - $(NetCoreAppCurrent) + + net8.0 diff --git a/docs/area-owners.json b/docs/area-owners.json index 0cd5a8645148c..72914f694781b 100644 --- a/docs/area-owners.json +++ b/docs/area-owners.json @@ -292,13 +292,12 @@ }, { "lead": "ericstj", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-extensions-configuration" ], @@ -359,13 +358,12 @@ }, { "lead": "ericstj", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-extensions-logging" ], @@ -373,13 +371,12 @@ }, { "lead": "ericstj", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-extensions-options" ], @@ -387,13 +384,12 @@ }, { "lead": "ericstj", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-extensions-primitives" ], @@ -646,14 +642,13 @@ }, { "lead": "jeffhandley", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "GrabYourPitchforks", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-collections" ], @@ -685,13 +680,12 @@ }, { "lead": "jeffhandley", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-componentmodel-dataannotations" ], @@ -770,13 +764,12 @@ }, { "lead": "ericstj", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-datetime" ], @@ -806,13 +799,12 @@ }, { "lead": "tommcdon", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "tommcdon", "dotnet/area-system-diagnostics-activity" @@ -956,13 +948,12 @@ }, { "lead": "ericstj", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-globalization" ], @@ -1014,13 +1005,12 @@ }, { "lead": "jeffhandley", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-io-ports" ], @@ -1028,13 +1018,12 @@ }, { "lead": "jeffhandley", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-linq" ], @@ -1357,14 +1346,13 @@ }, { "lead": "jeffhandley", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "GrabYourPitchforks", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-text-encoding" ], @@ -1372,14 +1360,13 @@ }, { "lead": "jeffhandley", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "GrabYourPitchforks", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-text-encodings-web" ], @@ -1387,13 +1374,12 @@ }, { "lead": "jeffhandley", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "steveharter", "tarekgh", "dotnet/area-system-text-json" @@ -1462,13 +1448,12 @@ }, { "lead": "jeffhandley", - "pod": "eirik-krzysztof-layomi-tarek", + "pod": "eirik-krzysztof-tarek", "owners": [ "eiriktsarpalis", "ericstj", "jeffhandley", "krwq", - "layomia", "tarekgh", "dotnet/area-system-xml" ], diff --git a/docs/design/coreclr/jit/ryujit-overview.md b/docs/design/coreclr/jit/ryujit-overview.md index 5635815aaaae5..cdb17002ee197 100644 --- a/docs/design/coreclr/jit/ryujit-overview.md +++ b/docs/design/coreclr/jit/ryujit-overview.md @@ -71,15 +71,13 @@ Definitions (aka, defs) of SDSU temps are represented by `GenTree` nodes themsel edges from the using node to the defining node. Furthermore, SDSU temps defined in one block may not be used in a different block. In cases where a value must be multiply-defined, multiply-used, or defined in one block and used in another, the IR provides another class of temporary: the local var (aka, local variable). Local vars are defined by -assignment nodes in HIR or store nodes in LIR, and are used by local var nodes in both forms. +store nodes and used by users of local var nodes. An HIR block is composed of a doubly-linked list of statement nodes (`Statement`), each of which references a single expression tree (`m_rootNode`). The `GenTree` nodes in this tree execute in "tree order", which is defined as the -order produced by a depth-first, left-to-right traversal of the tree, with two notable exceptions: +order produced by a depth-first, left-to-right traversal of the tree, with one notable exception: * Binary nodes marked with the `GTF_REVERSE_OPS` flag execute their right operand tree (`gtOp2`) before their left operand tree (`gtOp1`) -* Dynamically-sized block copy nodes where `gtEvalSizeFirst` is `true` execute the `gtDynamicSize` tree -before executing their other operand trees. In addition to tree order, HIR also requires that no SDSU temp is defined in one statement and used in another. In situations where the requirements of tree and statement order prove onerous (e.g. when code must execute at a @@ -114,7 +112,7 @@ the JIT. In HIR these links are primarily a convenience, as the order produced b the order produced by a "tree order" traversal (see above for details). In LIR these links define the execution order of the nodes. -HIR statement nodes utilize the same `GenTree` base type as the operation nodes, though they are not truly related. +HIR statement nodes are represented by the `Statement` type. * The statement nodes are doubly-linked. The first statement node in a block points to the last node in the block via its `m_prev` link. Note that the last statement node does *not* point to the first; that is, the list is not fully circular. * Each statement node contains two `GenTree` links – `m_rootNode` points to the top-level node in the statement (i.e. the root of the tree that represents the statement), while `m_treeList` points to the first node in execution order (again, this link is not always valid). @@ -138,9 +136,8 @@ A stripped-down dump of the `GenTree` nodes just after they are imported looks l ``` STMT00000 (IL 0x000...0x026) -▌ ASG double -├──▌ IND double -│ └──▌ LCL_VAR byref V03 arg3 +▌ STOREIND double +├──▌ LCL_VAR byref V03 arg3 └──▌ DIV double ├──▌ ADD double │ ├──▌ NEG double @@ -162,12 +159,16 @@ STMT00000 (IL 0x000...0x026) ## Types -The JIT is primarily concerned with "primitive" types, i.e. integers, reference types, pointers, and floating point -types. It must also be concerned with the format of user-defined value types (i.e. struct types derived from +The JIT is primarily concerned with "primitive" types, i.e. integers, reference types, pointers, floating point +and SIMD types. It must also be concerned with the format of user-defined value types (i.e. struct types derived from `System.ValueType`) – specifically, their size and the offset of any GC references they contain, so that they can be correctly initialized and copied. The primitive types are represented in the JIT by the `var_types` enum, and any additional information required for struct types is obtained from the JIT/EE interface by the use of an opaque -`CORINFO_CLASS_HANDLE`. +`CORINFO_CLASS_HANDLE`, which is converted into a `ClassLayout` instance that caches the most important information. +All `TYP_STRUCT`-typed nodes can be queried for the layout they produce via `GenTree::GetLayout`. + +Some nodes also use "small" integer types - `TYP_BYTE`, `TYP_UBYTE`, `TYP_SHORT` and `TYP_USHORT`, to represent +that they produce implicitly sign- or zero-extended `TYP_INT` values, much like in the IL stack model. ## Dataflow Information @@ -187,8 +188,8 @@ then propagates the liveness information. The result of the analysis is captured ## SSA Static single assignment (SSA) form is constructed in a traditional manner [[1]](#[1]). The SSA names are recorded on -the lclVar references. While SSA form usually retains a pointer or link to the defining reference, RyuJIT currently -retains only the `BasicBlock` in which the definition of each SSA name resides. +the lclVar references and point to the `LclSsaVarDsc` descriptors that contain the defining store node and block in +which it occurs. ## Value Numbering @@ -208,9 +209,12 @@ The top-level function of interest is `Compiler::compCompile`. It invokes the fo | [Inlining](#inlining) | The IR for inlined methods is incorporated into the flowgraph. | | [Struct Promotion](#struct-promotion) | New lclVars are created for each field of a promoted struct. | | [Mark Address-Exposed Locals](#mark-addr-exposed) | lclVars with references occurring in an address-taken context are marked. This must be kept up-to-date. | +| Early liveness | Compute lclVar liveness for use by phases up to and including global morph. | +| Forward Subtitution | Eliminate SDSU-like locals by substituting their values directly into uses. | +| Physical promotion | Split struct locals into primitives based on access patterns. | | [Morph Blocks](#morph-blocks) | Performs localized transformations, including mandatory normalization as well as simple optimizations. | | [Eliminate Qmarks](#eliminate-qmarks) | All `GT_QMARK` nodes are eliminated, other than simple ones that do not require control flow. | -| [Flowgraph Analysis](#flowgraph-analysis) | `BasicBlock` predecessors are computed, and must be kept valid. Loops are identified, and normalized, cloned and/or unrolled. | +| [Flowgraph Analysis](#flowgraph-analysis) | Loops are identified and normalized, cloned and/or unrolled. | | [Normalize IR for Optimization](#normalize-ir) | lclVar references counts are set, and must be kept valid. Evaluation order of `GenTree` nodes (`gtNext`/`gtPrev`) is determined, and must be kept valid. | | [SSA and Value Numbering Optimizations](#ssa-vn) | Computes liveness (`bbLiveIn` and `bbLiveOut` on `BasicBlock`s), and dominators. Builds SSA for tracked lclVars. Computes value numbers. | | [Loop Invariant Code Hoisting](#licm) | Hoists expressions out of loops. | @@ -218,10 +222,12 @@ The top-level function of interest is `Compiler::compCompile`. It invokes the fo | [Common Subexpression Elimination (CSE)](#cse) | Elimination of redundant subexressions based on value numbers. | | [Assertion Propagation](#assertion-propagation) | Utilizes value numbers to propagate and transform based on properties such as non-nullness. | | [Range analysis](#range-analysis) | Eliminate array index range checks based on value numbers and assertions | -| [Rationalization](#rationalization) | Flowgraph order changes from `FGOrderTree` to `FGOrderLinear`. All `GT_COMMA`, `GT_ASG` and `GT_ADDR` nodes are transformed. | -| [Lowering](#lowering) | Register requirements are fully specified (`gtLsraInfo`). All control flow is explicit. | +| [VN-based dead store elimination](#vn-based-dead-store-elimination) | Eliminate stores that do not change the value of a local. | +| [If conversion](#if-conversion) | Transform conditional definitions into `GT_SELECT` operators. | +| [Rationalization](#rationalization) | Flowgraph order changes from `FGOrderTree` to `FGOrderLinear`. All `GT_COMMA` nodes are transformed. | +| [Lowering](#lowering) | Nodes are tranformed for register allocation; Target-specific optimizations are performed. | | [Register allocation](#reg-alloc) | Registers are assigned (`gtRegNum` and/or `gtRsvdRegs`), and the number of spill temps calculated. | -| [Code Generation](#code-generation) | Determines frame layout. Generates code for each `BasicBlock`. Generates prolog & epilog code for the method. Emit EH, GC and Debug info. | +| [Code Generation](#code-generation) | Determines frame layout. Generates code for each `BasicBlock`. Generates prolog & epilog code for the method. Emits EH, GC and Debug info. | ## Pre-import @@ -234,7 +240,7 @@ Importation is the phase that creates the IR for the method, reading in one IL i the statements. During this process, it may need to generate IR with multiple, nested expressions. This is the purpose of the non-expression-like IR nodes: -* It may need to evaluate part of the expression into a temp, in which case it will use a comma (`GT_COMMA`) node to ensure that the temp is evaluated in the proper execution order – i.e. `GT_COMMA(GT_ASG(temp, exp), temp)` is inserted into the tree where "exp" would go. +* It may need to evaluate part of the expression into a temp, in which case it will use a comma (`GT_COMMA`) node to ensure that the temp is evaluated in the proper execution order – i.e. `GT_COMMA(GT_STORE_LCL_VAR(exp), temp)` is inserted into the tree where "exp" would go. * It may need to create conditional expressions, but adding control flow at this point would be quite messy. In this case it generates question mark/colon (?: or `GT_QMARK`/`GT_COLON`) trees that may be nested within an expression. During importation, tail call candidates (either explicitly marked or opportunistically identified) are identified @@ -291,7 +297,6 @@ This expands most `GT_QMARK`/`GT_COLON` trees into blocks, except for the case t At this point, a number of analyses and transformations are done on the flowgraph: -* Computing the predecessors of each block * Computing edge weights, if profile information is available * Computing reachability and dominators * Identifying and normalizing loops (transforming while loops to "do while") @@ -303,11 +308,10 @@ At this point, a number of properties are computed on the IR, and must remain va call this "normalization" * `lvaMarkLocalVars` – if this jit is optimizing, set the reference counts (raw and weighted) for lclVars, sort them, -and determine which will be tracked (currently up to 512). If not optimizing, all locals are given an implicit -reference count of one. Reference counts are not incrementally maintained. They can be recomputed if accurate -counts are needed. -* `optOptimizeBools` – this optimizes Boolean expressions, and may change the flowgraph (why is it not done prior to reachability and dominators?) -* Link the trees in evaluation order (setting `gtNext` and `gtPrev` fields): and `fgFindOperOrder()` and `fgSetBlockOrder()`. +and determine which will be tracked (up to `JitMaxLocalsToTrack`, 1024 by default). If not optimizing, all locals are +given an implicit reference count of one. Reference counts are not incrementally maintained. They can be recomputed if +accurate counts are needed. +* Link the trees in evaluation order (setting `gtNext` and `gtPrev` fields): `fgFindOperOrder()` and `fgSetBlockOrder()`. ## SSA and Value Numbering Optimizations @@ -347,6 +351,16 @@ Utilizes value numbers to propagate and transform based on properties such as no Optimize array index range checks based on value numbers and assertions. +### VN-based dead store elimination + +Walks over the SSA descriptors and removes definitions where the new value is identical to the previous. +This phase invalidates both SSA and value numbers; after this point both should be considered stale. + +### If Conversion + +Uses simple analysis to transform conditional definitions of locals into unconditional `GT_SELECT` nodes, which +will can later be emitted as, e. g., conditional moves. + ## Rationalization As the JIT has evolved, changes have been made to improve the ability to reason over the tree in both "tree order" @@ -355,9 +369,6 @@ evolution, some of the changes have been made only in the later ("backend") comp transformations are made to the IR by a "Rationalizer" component. It is expected that over time some of these changes will migrate to an earlier place in the JIT phase order: -* Elimination of assignment nodes (`GT_ASG`). The assignment node was problematic because the semantics of its destination (left hand side of the assignment) could not be determined without context. For example, a `GT_LCL_VAR` on the left-hand side of an assignment is a definition of the local variable, but on the right-hand side it is a use. Furthermore, since the execution order requires that the children be executed before the parent, it is unnatural that the left-hand side of the assignment appears in execution order before the assignment operator. - * During rationalization, all assignments are replaced by stores, which either represent their destination on the store node itself (e.g. `GT_LCL_VAR`), or by the use of a child address node (e.g. `GT_STORE_IND`). -* Elimination of address nodes (`GT_ADDR`). These are problematic because of the need for parent context to analyze the child. * Elimination of "comma" nodes (`GT_COMMA`). These nodes are introduced for convenience during importation, during which a single tree is constructed at a time, and not incorporated into the statement list until it is completed. When it is necessary, for example, to store a partially-constructed tree into a temporary variable, a `GT_COMMA` node is used to link it into the tree. However, in later phases, these comma nodes are an impediment to analysis, and thus are eliminated. * In some cases, it is not possible to fully extract the tree into a separate statement, due to execution order dependencies. In these cases, an "embedded" statement is created. While these are conceptually very similar to the `GT_COMMA` nodes, they do not masquerade as expressions. * Elimination of "QMark" (`GT_QMARK`/`GT_COLON`) nodes is actually done at the end of morphing, long before the current rationalization phase. The presence of these nodes made analyses (especially dataflow) overly complex. @@ -369,14 +380,12 @@ new temporary lclVars, and that computation has been inserted as a `GT_COMMA` (c ``` STMT (IL 0x000...0x026) -▌ ASG double $VN.Void -├──▌ IND double $146 -│ └──▌ LCL_VAR byref V03 arg3 u:1 (last use) $c0 +▌ STOREIND double $VN.Void +├──▌ LCL_VAR byref V03 arg3 u:1 (last use) $c0 └──▌ DIV double $146 ├──▌ ADD double $144 │ ├──▌ COMMA double $83 - │ │ ├──▌ ASG double $VN.Void - │ │ │ ├──▌ LCL_VAR double V06 cse0 d:1 $83 + │ │ ├──▌ STORE_LCL_VAR double V06 cse0 d:1 $83 │ │ │ └──▌ INTRINSIC double sqrt $83 │ │ │ └──▌ SUB double $143 │ │ │ ├──▌ MUL double $140 @@ -389,21 +398,19 @@ STMT (IL 0x000...0x026) │ │ │ └──▌ LCL_VAR double V02 arg2 u:1 $82 │ │ └──▌ LCL_VAR double V06 cse0 u:1 $83 │ └──▌ COMMA double $84 - │ ├──▌ ASG double $VN.Void - │ │ ├──▌ LCL_VAR double V08 cse2 d:1 $84 + │ ├──▌ STORE_LCL_VAR double V08 cse2 d:1 $84 │ │ └──▌ NEG double $84 │ │ └──▌ LCL_VAR double V01 arg1 u:1 $81 │ └──▌ LCL_VAR double V08 cse2 u:1 $84 └──▌ COMMA double $145 - ├──▌ ASG double $VN.Void - │ ├──▌ LCL_VAR double V07 cse1 d:1 $145 + ├──▌ STORE_LCL_VAR double V07 cse1 d:1 $145 │ └──▌ MUL double $145 │ ├──▌ LCL_VAR double V00 arg0 u:1 $80 │ └──▌ CNS_DBL double 2.0000000000000000 $181 └──▌ LCL_VAR double V07 cse1 u:1 $145 ``` -After Rationalize, the nodes are presented in execution order, and the `GT_COMMA` (comma), `GT_ASG` (=), and +After Rationalize, the nodes are presented in execution order, and the `GT_COMMA` (comma) and `Statement` nodes have been eliminated: ``` @@ -645,7 +652,6 @@ There are several properties of the IR that are valid only during (or after) spe ## Phase Transitions * Flowgraph analysis - * Sets the predecessors of each block, which must be kept valid after this phase. * Computes reachability and dominators. These may be invalidated by changes to the flowgraph. * Computes edge weights, if profile information is available. * Identifies and normalizes loops. These may be invalidated, but must be marked as such. @@ -653,10 +659,7 @@ There are several properties of the IR that are valid only during (or after) spe * The lclVar reference counts are set by `lvaMarkLocalVars()`. * Statement ordering is determined by `fgSetBlockOrder()`. Execution order is a depth-first preorder traversal of the nodes, with the operands usually executed in order. The exceptions are: * Binary operators, which can have the `GTF_REVERSE_OPS` flag set to indicate that the RHS (`gtOp2`) should be evaluated before the LHS (`gtOp1`). - * Dynamically-sized block copy nodes, which can have `gtEvalSizeFirst` set to `true` to indicate that their `gtDynamicSize` tree should be evaluated before executing their other operands. * Rationalization - * All `GT_ASG` trees are transformed into `GT_STORE` variants (e.g. `GT_STORE_LCL_VAR`). - * All `GT_ADDR` nodes are eliminated (e.g. with `GT_LCL_VAR_ADDR`). * All `GT_COMMA` and `Statement` nodes are removed and their constituent nodes linked into execution order. * Lowering * `GenTree` nodes are split or transformed as needed to expose all of their register requirements and any necessary `flowgraph` changes (e.g., for switch statements). @@ -670,7 +673,7 @@ Ordering: * For 'GenTree' nodes, the `gtNext` and `gtPrev` fields are either `nullptr`, prior to ordering, or they are consistent (i.e. `A->gtPrev->gtNext = A`, and `A->gtNext->gtPrev == A`, if they are non-`nullptr`). * After normalization the `m_treeList` of the containing statement points to the first node to be executed. * Prior to normalization, the `gtNext` and `gtPrev` pointers on the expression `GenTree` nodes are invalid. The expression nodes are only traversed via the links from parent to child (e.g. `node->gtGetOp1()`, or `node->gtOp.gtOp1`). The `gtNext/gtPrev` links are set by `fgSetBlockOrder()`. - * After normalization, and prior to rationalization, the parent/child links remain the primary traversal mechanism. The evaluation order of any nested expression-statements (usually assignments) is enforced by the `GT_COMMA` in which they are contained. + * After normalization, and prior to rationalization, the parent/child links remain the primary traversal mechanism. The evaluation order of any nested expression-statements (usually stores) is enforced by the `GT_COMMA` in which they are contained. * After rationalization, all `GT_COMMA` nodes are eliminated, statements are flattened, and the primary traversal mechanism becomes the `gtNext/gtPrev` links which define the execution order. * In tree ordering: * The `gtPrev` of the first node (`m_treeList`) is always `nullptr`. @@ -784,14 +787,6 @@ so that the typical infix, left to right expression `a - b` becomes prefix, top └──▌ LCL_VAR double V02 b ``` -Assignments are displayed like all other binary operators, with `dest = src` becoming: - -``` -▌ ASG double -├──▌ LCL_VAR double V03 dest -└──▌ LCL_VAR double V02 src -``` - Calls initially display in source order - `Order(1, 2, 3, 4)` is: ``` @@ -836,10 +831,10 @@ STMT00000 (IL 0x010... ???) ``` Tree nodes are identified by their `gtTreeID`. This field only exists in DEBUG builds, but is quite useful for -debugging, since all tree nodes are created from the routine `gtNewNode` (in -[src/jit/gentree.cpp](https://github.com/dotnet/runtime/blob/main/src/coreclr/jit/gentree.cpp)). If you find a +debugging, since all tree nodes are created via the `GenTree::GenTree` constructor (in +[src/jit/compiler.hpp](https://github.com/dotnet/runtime/blob/main/src/coreclr/jit/compiler.hpp)). If you find a bad tree and wish to understand how it got corrupted, you can place a conditional breakpoint at the end of -`gtNewNode` to see when it is created, and then a data breakpoint on the field that you believe is corrupted. +`GenTree::GenTree` to see when it is created, and then a data breakpoint on the field that you believe is corrupted. The trees are connected by line characters (either in ASCII, by default, or in slightly more readable Unicode when `DOTNET_JitDumpASCII=0` is specified), to make it a bit easier to read. diff --git a/docs/design/features/host-runtime-information.md b/docs/design/features/host-runtime-information.md index d15009272072d..d2d96ff6ada83 100644 --- a/docs/design/features/host-runtime-information.md +++ b/docs/design/features/host-runtime-information.md @@ -84,10 +84,16 @@ List of directory paths corresponding to shared store paths and additional probi Hex string representation of a function pointer. It is set when running a single-file application. The function is called by the runtime to look for assemblies bundled into the application. The expected signature is defined as `BundleProbeFn` in [`coreclrhost.h`](/src/coreclr/hosts/inc/coreclrhost.h) +**.NET 9 and above** This property is no longer set by the host. `host_runtime_contract.bundle_probe` is set when running a single-file application. + `HOSTPOLICY_EMBEDDED` Indicates whether or not [`hostpolicy`](./host-components.md#host-policy) is embedded in the host executable. It is set to `true` when running a self-contained single-file application. +**.NET 9 and above** This property is no longer set by the host or read by the runtime. Self-contained single-file includes both host and runtime components in the executable, so the information is known at build-time. + `PINVOKE_OVERRIDE` Hex string representation of a function pointer. It is set when running a self-contained single-file application. The function is called by the runtime to check for redirected p/invokes. The expected signature is defined as `PInvokeOverrideFn` in [`coreclrhost.h`](/src/coreclr/hosts/inc/coreclrhost.h) and [`mono-private-unstable-types.h`](/src/native/public/mono/metadata/details/mono-private-unstable-types.h). + +**.NET 9 and above** This property is no longer set by the host. `host_runtime_contract.pinvoke_override` is set when running a self-contained single-file application. diff --git a/eng/Subsets.props b/eng/Subsets.props index 6f2bfd8be05e2..f2480616431b5 100644 --- a/eng/Subsets.props +++ b/eng/Subsets.props @@ -343,7 +343,7 @@ - + @@ -509,9 +509,9 @@ - + - + @@ -519,7 +519,7 @@ - + diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml index 8516d0dc24e70..0cefc2d3a2169 100644 --- a/eng/Version.Details.xml +++ b/eng/Version.Details.xml @@ -12,69 +12,69 @@ https://github.com/dotnet/wcf 7f504aabb1988e9a093c1e74d8040bd52feb2f01 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 https://github.com/dotnet/command-line-api @@ -85,24 +85,24 @@ 02fe27cd6a9b001c8feb7938e6ef4b3799745759b - + https://github.com/dotnet/cecil - 89be445dd4936157533ad96bafb95f701430653a + 13d6536e2dc92404da76d61d248badc040eb0de0 - + https://github.com/dotnet/emsdk - bebe955e9f7d392fbca594b1c76c54ba2e27027e + 2cbb8d159e76cbc061c637d235568b8ed1fc60bc - + https://github.com/dotnet/source-build-reference-packages - fc01829cbf76b7bbf48a39161562468715a0a3b4 + 3af65e74c8be435668f328c2bf134270b33d4e3a - + https://github.com/dotnet/source-build-externals - e45d334fa3fd29018b70c598eced1938c054884d + 588fbcbc2a221bc6cea33f12eb73c9117994154e @@ -237,164 +237,164 @@ https://github.com/dotnet/runtime-assets b7d8e946c831e79435054f65e49c2eebb74b55c6 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/llvm-project - dc18ea8f36885b70da5e7853aa6b5278ce8d0de4 + f8b9dcbbb9cafc3330500adb59be5bfc20fcefc2 - + https://github.com/dotnet/runtime - ebe6f541ca2adea41f4b1dfc97774af371654bab + 736dabeca728ccf8b911d96d1b4c575b4d0db7d2 - + https://github.com/dotnet/runtime - ebe6f541ca2adea41f4b1dfc97774af371654bab + 736dabeca728ccf8b911d96d1b4c575b4d0db7d2 - + https://github.com/dotnet/runtime - ebe6f541ca2adea41f4b1dfc97774af371654bab + 736dabeca728ccf8b911d96d1b4c575b4d0db7d2 - + https://github.com/dotnet/runtime - ebe6f541ca2adea41f4b1dfc97774af371654bab + 736dabeca728ccf8b911d96d1b4c575b4d0db7d2 - + https://github.com/dotnet/runtime - ebe6f541ca2adea41f4b1dfc97774af371654bab + 736dabeca728ccf8b911d96d1b4c575b4d0db7d2 - + https://github.com/dotnet/runtime - ebe6f541ca2adea41f4b1dfc97774af371654bab + 736dabeca728ccf8b911d96d1b4c575b4d0db7d2 - + https://github.com/dotnet/runtime - ebe6f541ca2adea41f4b1dfc97774af371654bab + 736dabeca728ccf8b911d96d1b4c575b4d0db7d2 - + https://github.com/dotnet/xharness - 06fd591dc00862e415eddbec734ae9ea77d35486 + 2b1d423ce08e1ed78c0a821d0850e0f5ab3b193a - + https://github.com/dotnet/xharness - 06fd591dc00862e415eddbec734ae9ea77d35486 + 2b1d423ce08e1ed78c0a821d0850e0f5ab3b193a - + https://github.com/dotnet/xharness - 06fd591dc00862e415eddbec734ae9ea77d35486 + 2b1d423ce08e1ed78c0a821d0850e0f5ab3b193a https://github.com/dotnet/arcade 1d451c32dda2314c721adbf8829e1c0cd4e681ff - + https://dev.azure.com/dnceng/internal/_git/dotnet-optimization - ee166d79f3a269d2a1c6b7d400df7e284b1aa67b + 492f7464d31d9599531fab2a67bc2422046f5133 - + https://dev.azure.com/dnceng/internal/_git/dotnet-optimization - ee166d79f3a269d2a1c6b7d400df7e284b1aa67b + 492f7464d31d9599531fab2a67bc2422046f5133 - + https://dev.azure.com/dnceng/internal/_git/dotnet-optimization - ee166d79f3a269d2a1c6b7d400df7e284b1aa67b + 492f7464d31d9599531fab2a67bc2422046f5133 - + https://dev.azure.com/dnceng/internal/_git/dotnet-optimization - ee166d79f3a269d2a1c6b7d400df7e284b1aa67b + 492f7464d31d9599531fab2a67bc2422046f5133 - + https://github.com/dotnet/hotreload-utils - 821048c1587d0384826230cdddf292570b8e430f + 6c20c1d568568b9b2da84f878ac9cb4a48aaa4e5 https://github.com/dotnet/runtime-assets b7d8e946c831e79435054f65e49c2eebb74b55c6 - + https://github.com/dotnet/roslyn - 232f7afa4966411958759c880de3a1765bdb28a0 + 0d735148bbb4cb511be547fbc1db63a2c81a821d - + https://github.com/dotnet/roslyn - 232f7afa4966411958759c880de3a1765bdb28a0 + 0d735148bbb4cb511be547fbc1db63a2c81a821d - + https://github.com/dotnet/roslyn - 232f7afa4966411958759c880de3a1765bdb28a0 + 0d735148bbb4cb511be547fbc1db63a2c81a821d - + https://github.com/dotnet/roslyn-analyzers - 39ccb5b7570c179a82aa604ab7c3712af94ef119 + 84fb81c27e0554eadf6b12f97eb52c7cd2803c7e - + https://github.com/dotnet/roslyn-analyzers - 39ccb5b7570c179a82aa604ab7c3712af94ef119 + 84fb81c27e0554eadf6b12f97eb52c7cd2803c7e - + https://github.com/dotnet/sdk - fda3911e6ad2b3e616765d07b7b8a5e5f1ee23d6 + bd9fe1f506ec4b133c9cf85df6b3e6cf55b9450f - + https://dev.azure.com/dnceng/internal/_git/dotnet-optimization - ee166d79f3a269d2a1c6b7d400df7e284b1aa67b + 492f7464d31d9599531fab2a67bc2422046f5133 - + https://dev.azure.com/dnceng/internal/_git/dotnet-optimization - ee166d79f3a269d2a1c6b7d400df7e284b1aa67b + 492f7464d31d9599531fab2a67bc2422046f5133 @@ -402,9 +402,9 @@ https://github.com/NuGet/NuGet.Client 8fef55f5a55a3b4f2c96cd1a9b5ddc51d4b927f8 - + https://github.com/dotnet/installer - 59c97b5166cefc312d1220437a6ec3e4f7bf2ef0 + ee7ccfc83a91c0acc385a45d15460efd1c1f44c1 diff --git a/eng/Versions.props b/eng/Versions.props index 484697313f018..3de78378cf4b6 100644 --- a/eng/Versions.props +++ b/eng/Versions.props @@ -7,19 +7,19 @@ 0 0 9.0.100 - 8.0.0-rc.1.23414.4 + 8.0.0-rc.2.23469.22 7.0.8 6.0.$([MSBuild]::Add($([System.Version]::Parse('$(PackageVersionNet7)').Build),11)) alpha 1 - -$(PreReleaseVersionLabel).$(PreReleaseVersionIteration) + + false + release + -$(PreReleaseVersionLabel).$(PreReleaseVersionIteration) $(SdkBandVersion)$(WorkloadVersionSuffix) $(MajorVersion).$(MinorVersion).0.0 - - false - release false false @@ -31,17 +31,17 @@ - 3.11.0-beta1.23472.2 - 9.0.0-preview.23472.2 + 3.11.0-beta1.23478.1 + 9.0.0-preview.23478.1 - 4.8.0-3.23474.1 - 4.8.0-3.23474.1 - 4.8.0-3.23474.1 + 4.8.0-3.23501.1 + 4.8.0-3.23501.1 + 4.8.0-3.23501.1 - 9.0.100-alpha.1.23473.2 + 9.0.100-alpha.1.23480.1 8.0.0-beta.23463.1 8.0.0-beta.23463.1 @@ -101,18 +101,18 @@ 6.0.0-preview.1.102 - 9.0.0-alpha.1.23466.6 + 9.0.0-alpha.1.23470.17 6.0.0 - 9.0.0-alpha.1.23466.6 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 + 9.0.0-alpha.1.23470.17 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 6.0.0 1.1.1 @@ -131,12 +131,12 @@ 5.0.0 5.0.0 7.0.0 - 9.0.0-alpha.1.23466.6 + 9.0.0-alpha.1.23470.17 6.0.0 7.0.0 4.5.4 4.5.0 - 9.0.0-alpha.1.23466.6 + 9.0.0-alpha.1.23470.17 8.0.0-beta.23456.1 8.0.0-beta.23456.1 @@ -153,12 +153,12 @@ 8.0.0-beta.23456.1 8.0.0-beta.23456.1 - 1.0.0-prerelease.23471.3 - 1.0.0-prerelease.23471.3 - 1.0.0-prerelease.23471.3 - 1.0.0-prerelease.23471.3 - 1.0.0-prerelease.23471.3 - 1.0.0-prerelease.23471.3 + 1.0.0-prerelease.23478.3 + 1.0.0-prerelease.23478.3 + 1.0.0-prerelease.23478.3 + 1.0.0-prerelease.23478.3 + 1.0.0-prerelease.23478.3 + 1.0.0-prerelease.23478.3 16.11.27-beta1.23180.1 2.0.0-beta4.23307.1 @@ -178,10 +178,10 @@ 1.1.0 17.4.0-preview-20220707-01 - 8.0.0-prerelease.23471.1 - 8.0.0-prerelease.23471.1 - 8.0.0-prerelease.23471.1 - 8.0.0-alpha.0.23461.1 + 8.0.0-prerelease.23477.1 + 8.0.0-prerelease.23477.1 + 8.0.0-prerelease.23477.1 + 8.0.0-alpha.0.23475.1 2.4.2 1.0.0 2.4.5 @@ -206,54 +206,54 @@ 8.0.0-preview-20230918.1 - 9.0.0-alpha.1.23466.6 + 9.0.0-alpha.1.23470.17 - 0.11.4-alpha.23468.2 + 0.11.4-alpha.23476.1 - 9.0.0-alpha.1.23466.6 + 9.0.0-alpha.1.23470.17 9.0.0-alpha.1.23454.1 2.2.2 8.0.0-alpha.1.23180.2 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 - 9.0.0-alpha.1.23465.3 + 9.0.0-alpha.1.23478.1 $(MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion) 1.1.87-gba258badda 1.0.0-v3.14.0.5722 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 - 16.0.5-alpha.1.23452.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 + 16.0.5-alpha.1.23472.1 3.1.7 1.0.406601 - 9.0.100-alpha.1.23474.1 + 9.0.100-alpha.1.23502.7 $(MicrosoftDotnetSdkInternalVersion) diff --git a/eng/codeOptimization.targets b/eng/codeOptimization.targets index e1d81e46c0b98..d42a39e3dfd2a 100644 --- a/eng/codeOptimization.targets +++ b/eng/codeOptimization.targets @@ -27,4 +27,17 @@ + + + + true + + + + + + + $(PublishReadyToRunCrossgen2ExtraArgs);--embed-pgo-data + + diff --git a/eng/generators.targets b/eng/generators.targets index d1e13147ed12c..b5ca3ca89ee42 100644 --- a/eng/generators.targets +++ b/eng/generators.targets @@ -8,14 +8,18 @@ $(ToolsILLinkDir)$(NetCoreAppToolCurrent)\ILLink.Tasks.dll + $(ToolsILLinkDir)$(NetFrameworkToolCurrent)\ILLink.Tasks.dll _EmbedILLinkXmls;$(PrepareResourcesDependsOn) $(TargetsTriggeredByCompilation);ILLinkTrimAssembly @@ -119,7 +120,10 @@ - + - + diff --git a/eng/intellisense.targets b/eng/intellisense.targets index fc439383eb38b..4123e0140ed32 100644 --- a/eng/intellisense.targets +++ b/eng/intellisense.targets @@ -18,9 +18,12 @@ - the intellisense package xml file is used or - the assembly is private (i.e. System.Private.Uri) or - the assembly is a PNSE assembly. --> - $(NoWarn);1591 + $(NoWarn);1591 $([MSBuild]::NormalizeDirectory('$(CoreCLRArtifactsPath)', 'sharedFramework')) - $([MSBuild]::NormalizeDirectory('$(CoreCLRArtifactsPath)', 'crossgen2')) $([MSBuild]::NormalizeDirectory('$(CoreCLRArtifactsPath)', 'ilc-published')) $([MSBuild]::NormalizeDirectory('$(CoreCLRArtifactsPath)', '$(BuildArchitecture)', 'ilc')) $([MSBuild]::NormalizeDirectory('$(CoreCLRArtifactsPath)', 'aotsdk')) $([MSBuild]::NormalizeDirectory('$(CoreCLRArtifactsPath)', 'build')) + $([MSBuild]::NormalizeDirectory('$(CoreCLRArtifactsPath)', '$(BuildArchitecture)', 'crossgen2')) + $([MSBuild]::NormalizeDirectory('$(ArtifactsBinDir)', 'ILLink.Tasks', '$(ToolsConfiguration)')) $([MSBuild]::NormalizeDirectory('$(MonoArtifactsPath)', 'cross', $(TargetOS)-$(TargetArchitecture.ToLowerInvariant()))) diff --git a/eng/native/ijw/IJW.cmake b/eng/native/ijw/IJW.cmake index 4b580aff599c8..5782babe75549 100644 --- a/eng/native/ijw/IJW.cmake +++ b/eng/native/ijw/IJW.cmake @@ -45,7 +45,7 @@ if (CLR_CMAKE_HOST_WIN32) # 4365 - signed/unsigned mismatch # 4679 - Could not import member. This is an issue with IJW and static abstract methods in interfaces. - add_compile_options(/wd4365 /wd4679) + add_compile_options(/wd4365 /wd4679 /wd5271) # IJW add_compile_options(/clr:netcore) diff --git a/eng/pipelines/common/evaluate-default-paths.yml b/eng/pipelines/common/evaluate-default-paths.yml index c5bebbf8aeb4a..ac43f54499e9b 100644 --- a/eng/pipelines/common/evaluate-default-paths.yml +++ b/eng/pipelines/common/evaluate-default-paths.yml @@ -52,6 +52,15 @@ parameters: eng/testing/bump-chrome-version.proj eng/testing/ChromeVersions.props ] + _perf_pipeline_specific_only: [ + eng/pipelines/runtime-wasm-perf.yml + eng/pipelines/coreclr/perf*.yml + eng/pipelines/coreclr/templates/perf-job.yml + eng/pipelines/coreclr/templates/*-perf-* + eng/pipelines/coreclr/templates/run-perf* + eng/pipelines/coreclr/templates/run-scenarios-job.yml + eng/testing/performance/* + ] # src/workloads is only used in runtime-official builds # where evaluate-paths is not used @@ -90,6 +99,7 @@ jobs: - ${{ parameters._const_paths._wasm_specific_only }} - ${{ parameters._const_paths._wasm_pipelines }} - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} - subset: mono_excluding_wasm include: @@ -101,6 +111,7 @@ jobs: - ${{ parameters._const_paths._wasm_specific_only }} - ${{ parameters._const_paths._wasm_pipelines }} - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} - eng/Version.Details.xml - docs/* @@ -130,6 +141,7 @@ jobs: - ${{ parameters._const_paths._wasm_specific_only }} - ${{ parameters._const_paths._wasm_pipelines }} - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} - subset: runtimetests combined: true @@ -137,6 +149,7 @@ jobs: - src/tests/* exclude: - ${{ parameters._const_paths._wasm_specific_only }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} - subset: tools_illink include: @@ -160,6 +173,7 @@ jobs: - ${{ parameters._const_paths._wasm_specific_only }} - ${{ parameters._const_paths._wasm_pipelines }} - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} # We have limited Apple Silicon testing capacity # We want PR testing on a narrower set of changes @@ -189,6 +203,8 @@ jobs: - eng/Version.Details.xml - eng/Versions.props - eng/testing/scenarios/BuildWasmAppsJobsList.txt + - eng/testing/tests.browser.targets + - eng/testing/tests.was*.targets - eng/testing/workloads-testing.targets - src/installer/pkg/sfx/Microsoft.NETCore.App/* - src/libraries/sendtohelix* @@ -217,6 +233,7 @@ jobs: - ${{ parameters._const_paths._wasm_pipelines }} exclude: - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} - subset: wasmdebuggertests combined: true @@ -232,6 +249,7 @@ jobs: exclude: - src/mono/nuget/* - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} # wasm/runtimetests need to be run - subset: wasm_runtimetests @@ -247,9 +265,11 @@ jobs: - src/mono/tools/* - src/mono/wasi/* - src/mono/wasm/debugger/* + - src/mono/wasm/host/* - src/mono/wasm/Wasm.Build.Tests/* - ${{ parameters._const_paths._wasm_pipelines }} - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} # Wasm except Wasm.build.Tests, and debugger - subset: wasm_specific_except_wbt_dbg @@ -264,12 +284,18 @@ jobs: - eng/testing/workloads-testing.targets - src/mono/mono/component/mini-wasm-debugger.c - src/mono/wasm/debugger/* + - src/mono/wasm/host/* - src/mono/wasm/Wasm.Build.Tests/* - src/mono/nuget/Microsoft.NET.Runtime* src/mono/nuget/Microsoft.NET.Sdk.WebAssembly.Pack/* - src/mono/nuget/Microsoft.NET.Workload* - src/mono/nuget/Microsoft.NETCore.BrowserDebugHost.Transport/* - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} + + - subset: wasm_chrome + include: + - ${{ parameters._const_paths._wasm_chrome }} # anything other than mono, or wasm specific paths - subset: non_mono_and_wasm @@ -278,6 +304,7 @@ jobs: - ${{ parameters._const_paths._wasm_specific_only }} - ${{ parameters._const_paths._wasm_pipelines }} - ${{ parameters._const_paths._always_exclude }} + - ${{ parameters._const_paths._perf_pipeline_specific_only }} - eng/testing/tests.mobile.targets - src/mono/* - src/tasks/AndroidAppBuilder/* diff --git a/eng/pipelines/common/global-build-job.yml b/eng/pipelines/common/global-build-job.yml index a13548956ff60..cdda8c99dc519 100644 --- a/eng/pipelines/common/global-build-job.yml +++ b/eng/pipelines/common/global-build-job.yml @@ -26,8 +26,7 @@ parameters: helixQueues: '' enablePublishTestResults: false testResultsFormat: '' - extraStepsTemplate: '' - extraStepsParameters: {} + postBuildSteps: [] extraVariablesTemplates: [] isManualCodeQLBuild: false preBuildSteps: [] @@ -209,7 +208,28 @@ jobs: - ${{ if ne(parameters.preBuildSteps,'') }}: - ${{ each preBuildStep in parameters.preBuildSteps }}: - - ${{ preBuildStep }} + - ${{ if ne(preBuildStep.template, '') }}: + - template: ${{ preBuildStep.template }} + parameters: + osGroup: ${{ parameters.osGroup }} + osSubgroup: ${{ parameters.osSubgroup }} + archType: ${{ parameters.archType }} + buildConfig: ${{ parameters.buildConfig }} + runtimeFlavor: ${{ parameters.runtimeFlavor }} + runtimeVariant: ${{ parameters.runtimeVariant }} + helixQueues: ${{ parameters.helixQueues }} + targetRid: ${{ parameters.targetRid }} + nameSuffix: ${{ parameters.nameSuffix }} + platform: ${{ parameters.platform }} + pgoType: ${{ parameters.pgoType }} + shouldContinueOnError: ${{ parameters.shouldContinueOnError }} + ${{ if ne(preBuildStep.forwardedParameters, '') }}: + ${{ each parameter in preBuildStep.forwardedParameters }}: + ${{ parameter }}: ${{ parameters[parameter] }} + ${{ if ne(preBuildStep.parameters, '') }}: + ${{ insert }}: ${{ preBuildStep.parameters }} + - ${{ else }}: + - ${{ preBuildStep }} # Build - ${{ if eq(parameters.isSourceBuild, false) }}: @@ -235,21 +255,29 @@ jobs: condition: always() # If intended to send extra steps after regular build add them here. - - ${{ if ne(parameters.extraStepsTemplate, '') }}: - - template: ${{ parameters.extraStepsTemplate }} - parameters: - osGroup: ${{ parameters.osGroup }} - osSubgroup: ${{ parameters.osSubgroup }} - archType: ${{ parameters.archType }} - buildConfig: ${{ parameters.buildConfig }} - runtimeFlavor: ${{ parameters.runtimeFlavor }} - runtimeVariant: ${{ parameters.runtimeVariant }} - helixQueues: ${{ parameters.helixQueues }} - targetRid: ${{ parameters.targetRid }} - nameSuffix: ${{ parameters.nameSuffix }} - platform: ${{ parameters.platform }} - shouldContinueOnError: ${{ parameters.shouldContinueOnError }} - ${{ insert }}: ${{ parameters.extraStepsParameters }} + - ${{ if ne(parameters.postBuildSteps,'') }}: + - ${{ each postBuildStep in parameters.postBuildSteps }}: + - ${{ if ne(postBuildStep.template, '') }}: + - template: ${{ postBuildStep.template }} + parameters: + osGroup: ${{ parameters.osGroup }} + osSubgroup: ${{ parameters.osSubgroup }} + archType: ${{ parameters.archType }} + buildConfig: ${{ parameters.buildConfig }} + runtimeFlavor: ${{ parameters.runtimeFlavor }} + runtimeVariant: ${{ parameters.runtimeVariant }} + helixQueues: ${{ parameters.helixQueues }} + targetRid: ${{ parameters.targetRid }} + nameSuffix: ${{ parameters.nameSuffix }} + platform: ${{ parameters.platform }} + shouldContinueOnError: ${{ parameters.shouldContinueOnError }} + ${{ if ne(postBuildStep.forwardedParameters, '') }}: + ${{ each parameter in postBuildStep.forwardedParameters }}: + ${{ parameter }}: ${{ parameters[parameter] }} + ${{ if ne(postBuildStep.parameters, '') }}: + ${{ insert }}: ${{ postBuildStep.parameters }} + - ${{ else }}: + - ${{ postBuildStep }} - ${{ if and(eq(parameters.isOfficialBuild, true), eq(parameters.osGroup, 'windows')) }}: - powershell: ./eng/collect_vsinfo.ps1 -ArchiveRunName postbuild_log diff --git a/eng/pipelines/common/templates/browser-wasm-build-tests.yml b/eng/pipelines/common/templates/browser-wasm-build-tests.yml index 28d659e1607f4..dabdf608f4e1b 100644 --- a/eng/pipelines/common/templates/browser-wasm-build-tests.yml +++ b/eng/pipelines/common/templates/browser-wasm-build-tests.yml @@ -117,10 +117,11 @@ jobs: eq(variables['isDefaultPipeline'], variables['shouldRunWasmBuildTestsOnDefaultPipeline'])) # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig)_$(_hostedOs) - extraHelixArguments: /p:BrowserHost=$(_hostedOs) - scenarios: - - buildwasmapps + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig)_$(_hostedOs) + extraHelixArguments: /p:BrowserHost=$(_hostedOs) + scenarios: + - buildwasmapps diff --git a/eng/pipelines/common/templates/simple-wasm-build-tests.yml b/eng/pipelines/common/templates/simple-wasm-build-tests.yml index dcba5522f441b..7a593f4f9be20 100644 --- a/eng/pipelines/common/templates/simple-wasm-build-tests.yml +++ b/eng/pipelines/common/templates/simple-wasm-build-tests.yml @@ -41,11 +41,12 @@ jobs: eq(variables['alwaysRunVar'], true), eq(variables['isDefaultPipeline'], variables['shouldRunWasmBuildTestsOnDefaultPipeline'])) # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig)_$(_hostedOs) - extraHelixArguments: /p:BrowserHost=$(_hostedOs) - scenarios: - - buildwasmapps + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig)_$(_hostedOs) + extraHelixArguments: /p:BrowserHost=$(_hostedOs) + scenarios: + - buildwasmapps diff --git a/eng/pipelines/common/templates/wasm-build-only.yml b/eng/pipelines/common/templates/wasm-build-only.yml index 9e9b0cb332c8f..4f2588e498569 100644 --- a/eng/pipelines/common/templates/wasm-build-only.yml +++ b/eng/pipelines/common/templates/wasm-build-only.yml @@ -38,7 +38,8 @@ jobs: buildArgs: -s mono+libs+packs+libs.tests$(workloadSubsetArg) -c $(_BuildConfig) /p:BrowserHost=$(_hostedOs) ${{ parameters.extraBuildArgs }} /p:TestAssemblies=false $(extraBuildArgs) timeoutInMinutes: 120 condition: ${{ parameters.condition }} - extraStepsTemplate: /eng/pipelines/common/wasm-post-build-steps.yml - extraStepsParameters: - publishArtifactsForWorkload: ${{ parameters.publishArtifactsForWorkload }} - publishWBT: ${{ parameters.publishWBT }} + postBuildSteps: + - template: /eng/pipelines/common/wasm-post-build-steps.yml + parameters: + publishArtifactsForWorkload: ${{ parameters.publishArtifactsForWorkload }} + publishWBT: ${{ parameters.publishWBT }} diff --git a/eng/pipelines/common/templates/wasm-debugger-tests.yml b/eng/pipelines/common/templates/wasm-debugger-tests.yml index fd19fe5385cbb..17c0f415cf1b4 100644 --- a/eng/pipelines/common/templates/wasm-debugger-tests.yml +++ b/eng/pipelines/common/templates/wasm-debugger-tests.yml @@ -52,10 +52,11 @@ jobs: and( eq(variables['isDefaultPipeline'], variables['shouldRunOnDefaultPipelines']), eq(${{ parameters.isWasmOnlyBuild }}, ${{ parameters.runOnlyOnWasmOnlyPipelines }}))) - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_${{ parameters.browser }}_$(_BuildConfig) - extraHelixArguments: /p:BrowserHost=$(_hostedOs) /p:_DebuggerHosts=${{ parameters.browser }} - scenarios: - - wasmdebuggertests + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_${{ parameters.browser }}_$(_BuildConfig) + extraHelixArguments: /p:BrowserHost=$(_hostedOs) /p:_DebuggerHosts=${{ parameters.browser }} + scenarios: + - wasmdebuggertests diff --git a/eng/pipelines/common/templates/wasm-library-tests.yml b/eng/pipelines/common/templates/wasm-library-tests.yml index a848e25e1a28a..87fe49c793939 100644 --- a/eng/pipelines/common/templates/wasm-library-tests.yml +++ b/eng/pipelines/common/templates/wasm-library-tests.yml @@ -36,6 +36,7 @@ jobs: eq(variables['wasmDarcDependenciesChanged'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_tools_illink.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), + eq(dependencies.evaluate_paths.outputs['SetPathVars_wasm_chrome.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_wasm_specific_except_wbt_dbg.containsChange'], true)) ] - name: _wasmRunSmokeTestsOnlyArg @@ -45,12 +46,17 @@ jobs: value: /p:InstallChromeForTests=true ${{ else }}: value: '' + - name: v8InstallArg + ${{ if containsValue(parameters.scenarios, 'normal') }}: + value: /p:InstallV8ForTests=true + ${{ else }}: + value: '' jobParameters: isExtraPlatforms: ${{ parameters.isExtraPlatformsBuild }} testGroup: innerloop nameSuffix: LibraryTests${{ parameters.nameSuffix }} - buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true /p:BrowserHost=$(_hostedOs) $(_wasmRunSmokeTestsOnlyArg) $(chromeInstallArg) ${{ parameters.extraBuildArgs }} + buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true /p:BrowserHost=$(_hostedOs) $(_wasmRunSmokeTestsOnlyArg) $(chromeInstallArg) $(v8InstallArg) ${{ parameters.extraBuildArgs }} timeoutInMinutes: 240 # if !alwaysRun, then: # if this is runtime-wasm (isWasmOnlyBuild): @@ -62,9 +68,10 @@ jobs: eq(variables['alwaysRunVar'], true), eq(variables['isDefaultPipeline'], variables['shouldRunOnDefaultPipelines'])) # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) - extraHelixArguments: /p:BrowserHost=$(_hostedOs) $(_wasmRunSmokeTestsOnlyArg) ${{ parameters.extraHelixArgs }} - scenarios: ${{ parameters.scenarios }} + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + extraHelixArguments: /p:BrowserHost=$(_hostedOs) $(_wasmRunSmokeTestsOnlyArg) ${{ parameters.extraHelixArgs }} + scenarios: ${{ parameters.scenarios }} diff --git a/eng/pipelines/common/templates/wasm-runtime-tests.yml b/eng/pipelines/common/templates/wasm-runtime-tests.yml index 43671a546ba00..d8a4b76b54b59 100644 --- a/eng/pipelines/common/templates/wasm-runtime-tests.yml +++ b/eng/pipelines/common/templates/wasm-runtime-tests.yml @@ -36,15 +36,17 @@ jobs: isExtraPlatforms: ${{ parameters.isExtraPlatformsBuild }} nameSuffix: AllSubsets_Mono_RuntimeTests runtimeVariant: monointerpreter - buildArgs: -s mono+libs -c $(_BuildConfig) ${{ parameters.extraBuildArgs }} + buildArgs: -s mono+libs -c $(_BuildConfig) /p:InstallV8ForTests=false ${{ parameters.extraBuildArgs }} timeoutInMinutes: 180 condition: >- or( eq(variables['alwaysRunVar'], true), eq(variables['isDefaultPipeline'], variables['shouldRunOnDefaultPipelines'])) - extraStepsTemplate: //eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + testBuildArgs: /p:InstallV8ForTests=false extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml diff --git a/eng/pipelines/coreclr/ci.yml b/eng/pipelines/coreclr/ci.yml index e277bf0f483fd..84b0f569d42b8 100644 --- a/eng/pipelines/coreclr/ci.yml +++ b/eng/pipelines/coreclr/ci.yml @@ -184,4 +184,5 @@ extends: jobParameters: buildArgs: -s clr.paltests+clr.paltestlist nameSuffix: PALTests - extraStepsTemplate: /eng/pipelines/coreclr/templates/run-paltests-step.yml + postBuildSteps: + - template: /eng/pipelines/coreclr/templates/run-paltests-step.yml diff --git a/eng/pipelines/coreclr/perf-non-wasm-jobs.yml b/eng/pipelines/coreclr/perf-non-wasm-jobs.yml index 7c409fb265e41..738db29021c5f 100644 --- a/eng/pipelines/coreclr/perf-non-wasm-jobs.yml +++ b/eng/pipelines/coreclr/perf-non-wasm-jobs.yml @@ -2,24 +2,159 @@ jobs: - ${{ if and(ne(variables['System.TeamProject'], 'public'), in(variables['Build.Reason'], 'Schedule')) }}: - # build mono + # build mono iOS scenarios HybridGlobalization - template: /eng/pipelines/common/platform-matrix.yml parameters: - jobTemplate: /eng/pipelines/mono/templates/build-job.yml + jobTemplate: /eng/pipelines/common/global-build-job.yml + buildConfig: release runtimeFlavor: mono + platforms: + - ios_arm64 + jobParameters: + buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) + nameSuffix: iOSMono + isOfficialBuild: false + extraStepsTemplate: /eng/pipelines/coreclr/templates/build-perf-sample-apps.yml + extraStepsParameters: + rootFolder: '$(Build.SourcesDirectory)/artifacts/' + includeRootFolder: true + displayName: iOS Mono Artifacts + artifactName: iOSMonoarm64 + archiveExtension: '.tar.gz' + archiveType: tar + tarCompression: gz + hybridGlobalization: True + + # build NativeAOT iOS scenarios HybridGlobalization + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release + runtimeFlavor: coreclr platforms: - - linux_x64 + - ios_arm64 + jobParameters: + buildArgs: --cross -s clr.alljits+clr.tools+clr.nativeaotruntime+clr.nativeaotlibs+libs -c $(_BuildConfig) + nameSuffix: iOSNativeAOT + isOfficialBuild: false + extraStepsTemplate: /eng/pipelines/coreclr/templates/build-perf-sample-apps.yml + extraStepsParameters: + rootFolder: '$(Build.SourcesDirectory)/artifacts/' + includeRootFolder: true + displayName: iOS NativeAOT Artifacts + artifactName: iOSNativeAOTarm64 + archiveExtension: '.tar.gz' + archiveType: tar + tarCompression: gz + hybridGlobalization: True - # build coreclr and libraries + # run mono iOS scenarios scenarios HybridGlobalization - template: /eng/pipelines/common/platform-matrix.yml parameters: - jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml + jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml buildConfig: release + runtimeFlavor: mono platforms: - - linux_x64 + - osx_x64 jobParameters: testGroup: perf + runtimeType: iOSMono + projectFile: ios_scenarios.proj + runKind: ios_scenarios + runJobTemplate: /eng/pipelines/coreclr/templates/run-scenarios-job.yml + logicalmachine: 'perfiphone12mini' + iOSLlvmBuild: False + iOSStripSymbols: False + hybridGlobalization: True + + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml + buildConfig: release + runtimeFlavor: mono + platforms: + - osx_x64 + jobParameters: + testGroup: perf + runtimeType: iOSMono + projectFile: ios_scenarios.proj + runKind: ios_scenarios + runJobTemplate: /eng/pipelines/coreclr/templates/run-scenarios-job.yml + logicalmachine: 'perfiphone12mini' + iOSLlvmBuild: False + iOSStripSymbols: True + hybridGlobalization: True + + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml + buildConfig: release + runtimeFlavor: mono + platforms: + - osx_x64 + jobParameters: + testGroup: perf + runtimeType: iOSMono + projectFile: ios_scenarios.proj + runKind: ios_scenarios + runJobTemplate: /eng/pipelines/coreclr/templates/run-scenarios-job.yml + logicalmachine: 'perfiphone12mini' + iOSLlvmBuild: True + iOSStripSymbols: False + hybridGlobalization: True + + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml + buildConfig: release + runtimeFlavor: mono + platforms: + - osx_x64 + jobParameters: + testGroup: perf + runtimeType: iOSMono + projectFile: ios_scenarios.proj + runKind: ios_scenarios + runJobTemplate: /eng/pipelines/coreclr/templates/run-scenarios-job.yml + logicalmachine: 'perfiphone12mini' + iOSLlvmBuild: True + iOSStripSymbols: True + hybridGlobalization: True + + # run NativeAOT iOS scenarios HybridGlobalization + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml + buildConfig: release + runtimeFlavor: coreclr + platforms: + - osx_x64 + jobParameters: + testGroup: perf + runtimeType: iOSNativeAOT + projectFile: ios_scenarios.proj + runKind: ios_scenarios + runJobTemplate: /eng/pipelines/coreclr/templates/run-scenarios-job.yml + logicalmachine: 'perfiphone12mini' + iOSStripSymbols: False + hybridGlobalization: True + + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml + buildConfig: release + runtimeFlavor: coreclr + platforms: + - osx_x64 + jobParameters: + testGroup: perf + runtimeType: iOSNativeAOT + projectFile: ios_scenarios.proj + runKind: ios_scenarios + runJobTemplate: /eng/pipelines/coreclr/templates/run-scenarios-job.yml + logicalmachine: 'perfiphone12mini' + iOSStripSymbols: True + hybridGlobalization: True - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'Schedule')) }}: @@ -48,15 +183,16 @@ jobs: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:BuildMonoAOTCrossCompiler=true /p:MonoLibClang="/usr/local/lib/libclang.so.16" /p:AotHostArchitecture=x64 /p:AotHostOS=linux nameSuffix: AOT isOfficialBuild: false - extraStepsTemplate: /eng/pipelines/common/upload-artifact-step.yml - extraStepsParameters: - rootFolder: '$(Build.SourcesDirectory)/artifacts/' - includeRootFolder: true - displayName: AOT Mono Artifacts - artifactName: LinuxMonoAOTx64 - archiveExtension: '.tar.gz' - archiveType: tar - tarCompression: gz + postBuildSteps: + - template: /eng/pipelines/common/upload-artifact-step.yml + parameters: + rootFolder: '$(Build.SourcesDirectory)/artifacts/' + includeRootFolder: true + displayName: AOT Mono Artifacts + artifactName: LinuxMonoAOTx64 + archiveExtension: '.tar.gz' + archiveType: tar + tarCompression: gz # build mono Android scenarios - template: /eng/pipelines/common/platform-matrix.yml @@ -70,15 +206,16 @@ jobs: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) nameSuffix: AndroidMono isOfficialBuild: false - extraStepsTemplate: /eng/pipelines/coreclr/templates/build-perf-sample-apps.yml - extraStepsParameters: - rootFolder: '$(Build.SourcesDirectory)/artifacts/' - includeRootFolder: true - displayName: Android Mono Artifacts - artifactName: AndroidMonoarm64 - archiveExtension: '.tar.gz' - archiveType: tar - tarCompression: gz + postBuildSteps: + - template: /eng/pipelines/coreclr/templates/build-perf-sample-apps.yml + parameters: + rootFolder: '$(Build.SourcesDirectory)/artifacts/' + includeRootFolder: true + displayName: Android Mono Artifacts + artifactName: AndroidMonoarm64 + archiveExtension: '.tar.gz' + archiveType: tar + tarCompression: gz # build mono iOS scenarios - template: /eng/pipelines/common/platform-matrix.yml @@ -92,15 +229,16 @@ jobs: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) nameSuffix: iOSMono isOfficialBuild: false - extraStepsTemplate: /eng/pipelines/coreclr/templates/build-perf-sample-apps.yml - extraStepsParameters: - rootFolder: '$(Build.SourcesDirectory)/artifacts/' - includeRootFolder: true - displayName: iOS Mono Artifacts - artifactName: iOSMonoarm64 - archiveExtension: '.tar.gz' - archiveType: tar - tarCompression: gz + postBuildSteps: + - template: /eng/pipelines/coreclr/templates/build-perf-sample-apps.yml + parameters: + rootFolder: '$(Build.SourcesDirectory)/artifacts/' + includeRootFolder: true + displayName: iOS Mono Artifacts + artifactName: iOSMonoarm64 + archiveExtension: '.tar.gz' + archiveType: tar + tarCompression: gz # build NativeAOT iOS scenarios - template: /eng/pipelines/common/platform-matrix.yml @@ -114,15 +252,16 @@ jobs: buildArgs: --cross -s clr.alljits+clr.tools+clr.nativeaotruntime+clr.nativeaotlibs+libs -c $(_BuildConfig) nameSuffix: iOSNativeAOT isOfficialBuild: false - extraStepsTemplate: /eng/pipelines/coreclr/templates/build-perf-sample-apps.yml - extraStepsParameters: - rootFolder: '$(Build.SourcesDirectory)/artifacts/' - includeRootFolder: true - displayName: iOS NativeAOT Artifacts - artifactName: iOSNativeAOTarm64 - archiveExtension: '.tar.gz' - archiveType: tar - tarCompression: gz + postBuildSteps: + - template: /eng/pipelines/coreclr/templates/build-perf-sample-apps.yml + parameters: + rootFolder: '$(Build.SourcesDirectory)/artifacts/' + includeRootFolder: true + displayName: iOS NativeAOT Artifacts + artifactName: iOSNativeAOTarm64 + archiveExtension: '.tar.gz' + archiveType: tar + tarCompression: gz # build mono - template: /eng/pipelines/common/platform-matrix.yml @@ -403,9 +542,10 @@ jobs: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) nameSuffix: Mono_Packs isOfficialBuild: false - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks # build PerfBDN app - template: /eng/pipelines/common/platform-matrix.yml @@ -423,12 +563,13 @@ jobs: isOfficialBuild: false pool: vmImage: 'macos-12' - extraStepsTemplate: /eng/pipelines/coreclr/templates/build-perf-bdn-app.yml - extraStepsParameters: - rootFolder: '$(Build.SourcesDirectory)/artifacts/' - includeRootFolder: true - displayName: Android BDN App Artifacts - artifactName: PerfBDNAppArm - archiveExtension: '.tar.gz' - archiveType: tar - tarCompression: gz + postBuildSteps: + - template: /eng/pipelines/coreclr/templates/build-perf-bdn-app.yml + parameters: + rootFolder: '$(Build.SourcesDirectory)/artifacts/' + includeRootFolder: true + displayName: Android BDN App Artifacts + artifactName: PerfBDNAppArm + archiveExtension: '.tar.gz' + archiveType: tar + tarCompression: gz diff --git a/eng/pipelines/coreclr/perf-wasm-jobs.yml b/eng/pipelines/coreclr/perf-wasm-jobs.yml index 9bed189cab67e..ee36c3e41cff3 100644 --- a/eng/pipelines/coreclr/perf-wasm-jobs.yml +++ b/eng/pipelines/coreclr/perf-wasm-jobs.yml @@ -25,9 +25,10 @@ jobs: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:AotHostArchitecture=x64 /p:AotHostOS=$(_hostedOS) nameSuffix: wasm isOfficialBuild: false - extraStepsTemplate: /eng/pipelines/coreclr/perf-wasm-prepare-artifacts-steps.yml - extraStepsParameters: - configForBuild: Release + postBuildSteps: + - template: /eng/pipelines/coreclr/perf-wasm-prepare-artifacts-steps.yml + parameters: + configForBuild: Release #run mono wasm microbenchmarks perf job - template: /eng/pipelines/common/platform-matrix.yml @@ -92,9 +93,10 @@ jobs: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:AotHostArchitecture=x64 /p:AotHostOS=$(_hostedOS) nameSuffix: wasm isOfficialBuild: false - extraStepsTemplate: /eng/pipelines/coreclr/perf-wasm-prepare-artifacts-steps.yml - extraStepsParameters: - configForBuild: Release + postBuildSteps: + - template: /eng/pipelines/coreclr/perf-wasm-prepare-artifacts-steps.yml + parameters: + configForBuild: Release # run mono wasm interpreter (default) microbenchmarks perf job - template: /eng/pipelines/common/platform-matrix.yml @@ -171,3 +173,27 @@ jobs: logicalmachine: 'perftiger' downloadSpecificBuild: ${{ parameters.downloadSpecificBuild }} perfForkToUse: ${{ parameters.perfForkToUse }} + +- ${{if or(and(ne(variables['System.TeamProject'], 'public'), in(variables['Build.Reason'], 'Schedule')), in(variables['Build.DefinitionName'], 'runtime-wasm-perf')) }}: + # run mono wasm blazor perf job + - template: /eng/pipelines/common/platform-matrix.yml + parameters: + jobTemplate: /eng/pipelines/coreclr/templates/perf-job.yml + buildConfig: release + runtimeFlavor: wasm + platforms: + - linux_x64 + jobParameters: + testGroup: perf + liveLibrariesBuildConfig: Release + skipLiveLibrariesDownload: true + runtimeType: wasm + projectFile: blazor_perf.proj + runKind: blazor_scenarios + runJobTemplate: /eng/pipelines/coreclr/templates/run-scenarios-job.yml + # For working with a newer sdk, and previous tfm (eg. 9.0 sdk, and net8.0 tfm) + additionalSetupParameters: '--dotnetversions 8.0.0' # passed to performance-setup.sh + logicalmachine: 'perftiger' + downloadSpecificBuild: ${{ parameters.downloadSpecificBuild }} + perfForkToUse: ${{ parameters.perfForkToUse }} + hybridGlobalization: True diff --git a/eng/pipelines/coreclr/perf.yml b/eng/pipelines/coreclr/perf.yml index edaadde3e511e..969d660218cfc 100644 --- a/eng/pipelines/coreclr/perf.yml +++ b/eng/pipelines/coreclr/perf.yml @@ -27,13 +27,13 @@ variables: # Since, we are not running *any* perf jobs, none of these builds are needed, # thus the whole scheduled run can be disabled. # -#schedules: -#- cron: "30 2 * * *" - #displayName: Every night at 2:30AM - #branches: - #include: - #- main - #always: true +schedules: +- cron: "0 0 * * 1" + displayName: Weekly Monday 12am UTC Build + branches: + include: + - main + always: true extends: template: /eng/pipelines/common/templates/pipeline-with-resources.yml @@ -47,7 +47,7 @@ extends: collectHelixLogsScript: ${{ variables._wasmCollectHelixLogsScript }} #${{ and(ne(variables['System.TeamProject'], 'public'), in(variables['Build.Reason'], 'Schedule')) }}: # runProfile: 'non-v8' - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'Schedule')) }}: + ${{ if ne(variables['System.TeamProject'], 'public') }}: runProfile: 'v8' - template: /eng/pipelines/coreclr/perf-non-wasm-jobs.yml diff --git a/eng/pipelines/coreclr/perf_slow.yml b/eng/pipelines/coreclr/perf_slow.yml index 318bc25915d55..8a7bd972ba250 100644 --- a/eng/pipelines/coreclr/perf_slow.yml +++ b/eng/pipelines/coreclr/perf_slow.yml @@ -101,15 +101,16 @@ extends: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true /p:BuildMonoAOTCrossCompiler=true /p:MonoLibClang="/usr/local/lib/libclang.so.16" /p:AotHostArchitecture=arm64 /p:AotHostOS=linux nameSuffix: AOT isOfficialBuild: false - extraStepsTemplate: /eng/pipelines/common/upload-artifact-step.yml - extraStepsParameters: - rootFolder: '$(Build.SourcesDirectory)/artifacts/' - includeRootFolder: true - displayName: AOT Mono Artifacts - artifactName: LinuxMonoAOTarm64 - archiveExtension: '.tar.gz' - archiveType: tar - tarCompression: gz + postBuildSteps: + - template: /eng/pipelines/common/upload-artifact-step.yml + parameters: + rootFolder: '$(Build.SourcesDirectory)/artifacts/' + includeRootFolder: true + displayName: AOT Mono Artifacts + artifactName: LinuxMonoAOTarm64 + archiveExtension: '.tar.gz' + archiveType: tar + tarCompression: gz # run mono aot microbenchmarks perf job - template: /eng/pipelines/common/platform-matrix.yml diff --git a/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml b/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml index d7e0443561d83..3067f44c3e97c 100644 --- a/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml +++ b/eng/pipelines/coreclr/runtime-nativeaot-outerloop.yml @@ -67,10 +67,11 @@ extends: buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) /p:TestNativeAot=true /p:ArchiveTests=true timeoutInMinutes: 300 # doesn't normally take this long, but I've seen Helix queues backed up for 160 minutes # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) # # CoreCLR NativeAOT release build (checked runtime) and libraries tests @@ -91,10 +92,11 @@ extends: buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true timeoutInMinutes: 360 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: NativeAOT_Checked_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: NativeAOT_Checked_$(_BuildConfig) # # CoreCLR NativeAOT release build (checked runtime) - SizeOpt and libraries tests @@ -115,10 +117,11 @@ extends: buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true /p:OptimizationPreference=Size timeoutInMinutes: 240 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: NativeAOT_Checked_SizeOpt_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: NativeAOT_Checked_SizeOpt_$(_BuildConfig) # # CoreCLR NativeAOT release build (checked runtime) - SpeedOpt and libraries tests @@ -139,10 +142,11 @@ extends: buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) -rc Checked /p:TestNativeAot=true /p:ArchiveTests=true /p:OptimizationPreference=Speed timeoutInMinutes: 240 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: NativeAOT_Checked_SpeedOpt_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: NativeAOT_Checked_SpeedOpt_$(_BuildConfig) # # CoreCLR NativeAOT checked build and Pri0 tests @@ -165,11 +169,12 @@ extends: timeoutInMinutes: 240 nameSuffix: NativeAOT_Pri0 buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release - extraStepsTemplate: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml - extraStepsParameters: - creator: dotnet-bot - testBuildArgs: 'nativeaot /p:IlcUseServerGc=false' - liveLibrariesBuildConfig: Release + postBuildSteps: + - template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml + parameters: + creator: dotnet-bot + testBuildArgs: 'nativeaot /p:IlcUseServerGc=false' + liveLibrariesBuildConfig: Release testRunNamePrefixSuffix: NativeAOT_Pri0_$(_BuildConfig) extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml diff --git a/eng/pipelines/coreclr/superpmi-collect.yml b/eng/pipelines/coreclr/superpmi-collect.yml index 0ebf400f83cc6..820d8f1a531f2 100644 --- a/eng/pipelines/coreclr/superpmi-collect.yml +++ b/eng/pipelines/coreclr/superpmi-collect.yml @@ -15,6 +15,11 @@ trigger: # and should not be triggerable from a PR. pr: none +variables: +# disable CodeQL here, we have a separate pipeline for it +- name: Codeql.Enabled + value: False + schedules: - cron: "0 17 * * 0" displayName: Sun at 9:00 AM (UTC-8:00) diff --git a/eng/pipelines/coreclr/templates/build-perf-sample-apps.yml b/eng/pipelines/coreclr/templates/build-perf-sample-apps.yml index b74f22d62690a..fd55342e1a055 100644 --- a/eng/pipelines/coreclr/templates/build-perf-sample-apps.yml +++ b/eng/pipelines/coreclr/templates/build-perf-sample-apps.yml @@ -16,6 +16,7 @@ parameters: archiveExtension: '' archiveType: '' tarCompression: '' + hybridGlobalization: False steps: # Build Android sample app @@ -37,11 +38,11 @@ steps: displayName: clean bindir - ${{ if and(eq(parameters.osGroup, 'ios'), eq(parameters.nameSuffix, 'iOSMono')) }}: - - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=False DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false + - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=False DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} env: DevTeamProvisioning: '-' workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS - displayName: Build HelloiOS AOT sample app LLVM=False STRIP_SYMBOLS=False + displayName: Build HelloiOS AOT sample app LLVM=False STRIP_SYMBOLS=False HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} - task: PublishBuildArtifacts@1 condition: succeededOrFailed() displayName: 'Publish binlog' @@ -53,17 +54,17 @@ steps: rootFolder: $(Build.SourcesDirectory)/src/mono/sample/iOS/bin/ios-arm64/Bundle/HelloiOS/Release-iphoneos/HelloiOS.app includeRootFolder: true displayName: iOS Sample App NoLLVM - artifactName: iOSSampleAppNoLLVMSymbols + artifactName: iOSSampleAppNoLLVMSymbolsHybridGlobalization${{parameters.hybridGlobalization}} archiveExtension: '.zip' archiveType: zip - script: rm -r -f $(Build.SourcesDirectory)/src/mono/sample/iOS/bin workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS displayName: Clean bindir - - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=False DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true + - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=False DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} env: DevTeamProvisioning: '-' workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS - displayName: Build HelloiOS AOT sample app LLVM=False STRIP_SYMBOLS=True + displayName: Build HelloiOS AOT sample app LLVM=False STRIP_SYMBOLS=True HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} - task: PublishBuildArtifacts@1 condition: succeededOrFailed() displayName: 'Publish binlog' @@ -75,17 +76,17 @@ steps: rootFolder: $(Build.SourcesDirectory)/src/mono/sample/iOS/bin/ios-arm64/Bundle/HelloiOS/Release-iphoneos/HelloiOS.app includeRootFolder: true displayName: iOS Sample App NoLLVM NoSymbols - artifactName: iOSSampleAppNoLLVMNoSymbols + artifactName: iOSSampleAppNoLLVMNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}} archiveExtension: '.zip' archiveType: zip - script: rm -r -f $(Build.SourcesDirectory)/src/mono/sample/iOS/bin workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS displayName: Clean bindir - - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=True DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false + - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=True DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} env: DevTeamProvisioning: '-' workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS - displayName: Build HelloiOS AOT sample app LLVM=True STRIP_SYMBOLS=False + displayName: Build HelloiOS AOT sample app LLVM=True STRIP_SYMBOLS=False HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} - task: PublishBuildArtifacts@1 condition: succeededOrFailed() displayName: 'Publish binlog' @@ -97,17 +98,17 @@ steps: rootFolder: $(Build.SourcesDirectory)/src/mono/sample/iOS/bin/ios-arm64/Bundle/HelloiOS/Release-iphoneos/HelloiOS.app includeRootFolder: true displayName: iOS Sample App LLVM - artifactName: iOSSampleAppLLVMSymbols + artifactName: iOSSampleAppLLVMSymbolsHybridGlobalization${{parameters.hybridGlobalization}} archiveExtension: '.zip' archiveType: zip - script: rm -r -f $(Build.SourcesDirectory)/src/mono/sample/iOS/bin workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS displayName: Clean bindir - - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=True DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true + - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=True DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} env: DevTeamProvisioning: '-' workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS - displayName: Build HelloiOS AOT sample app LLVM=True STRIP_SYMBOLS=True + displayName: Build HelloiOS AOT sample app LLVM=True STRIP_SYMBOLS=True HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} - task: PublishBuildArtifacts@1 condition: succeededOrFailed() displayName: 'Publish binlog' @@ -119,16 +120,16 @@ steps: rootFolder: $(Build.SourcesDirectory)/src/mono/sample/iOS/bin/ios-arm64/Bundle/HelloiOS/Release-iphoneos/HelloiOS.app includeRootFolder: true displayName: iOS Sample App LLVM NoSymbols - artifactName: iOSSampleAppLLVMNoSymbols + artifactName: iOSSampleAppLLVMNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}} archiveExtension: '.zip' archiveType: zip - ${{ if and(eq(parameters.osGroup, 'ios'), eq(parameters.nameSuffix, 'iOSNativeAOT')) }}: - - script: make hello-app TARGET_OS=ios TARGET_ARCH=arm64 BUILD_CONFIG=Release DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false + - script: make hello-app TARGET_OS=ios TARGET_ARCH=arm64 BUILD_CONFIG=Release DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} env: DevTeamProvisioning: '-' workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS-NativeAOT - displayName: Build HelloiOS Native AOT sample app STRIP_SYMBOLS=False + displayName: Build HelloiOS Native AOT sample app STRIP_SYMBOLS=False HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} - task: PublishBuildArtifacts@1 condition: succeededOrFailed() displayName: 'Publish binlog' @@ -140,17 +141,17 @@ steps: rootFolder: $(Build.SourcesDirectory)/src/mono/sample/iOS-NativeAOT/bin/ios-arm64/Bundle/HelloiOS/Release-iphoneos/HelloiOS.app includeRootFolder: true displayName: iOS Sample App Symbols - artifactName: iOSSampleAppSymbols + artifactName: iOSSampleAppSymbolsHybridGlobalization${{parameters.hybridGlobalization}} archiveExtension: '.zip' archiveType: zip - script: rm -r -f $(Build.SourcesDirectory)/src/mono/sample/iOS-NativeAOT/bin workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS-NativeAOT displayName: Clean bindir - - script: make hello-app TARGET_OS=ios TARGET_ARCH=arm64 BUILD_CONFIG=Release DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true + - script: make hello-app TARGET_OS=ios TARGET_ARCH=arm64 BUILD_CONFIG=Release DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} env: DevTeamProvisioning: '-' workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS-NativeAOT - displayName: Build HelloiOS Native AOT sample app STRIP_SYMBOLS=True + displayName: Build HelloiOS Native AOT sample app STRIP_SYMBOLS=True HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }} - task: PublishBuildArtifacts@1 condition: succeededOrFailed() displayName: 'Publish binlog' @@ -162,6 +163,6 @@ steps: rootFolder: $(Build.SourcesDirectory)/src/mono/sample/iOS-NativeAOT/bin/ios-arm64/Bundle/HelloiOS/Release-iphoneos/HelloiOS.app includeRootFolder: true displayName: iOS Sample App NoSymbols - artifactName: iOSSampleAppNoSymbols + artifactName: iOSSampleAppNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}} archiveExtension: '.zip' archiveType: zip diff --git a/eng/pipelines/coreclr/templates/format-job.yml b/eng/pipelines/coreclr/templates/format-job.yml index 2417000204c01..88e5184db032c 100644 --- a/eng/pipelines/coreclr/templates/format-job.yml +++ b/eng/pipelines/coreclr/templates/format-job.yml @@ -35,13 +35,21 @@ jobs: - ${{ if eq(parameters.osGroup, 'windows') }}: - name: PythonScript value: 'py -3' - - ${{ if ne(parameters.osGroup, 'windows') }}: - name: PythonScript value: 'python3' + - ${{ if eq(parameters.osGroup, 'linux') }}: + - name: LinuxCrossArg + value: '--cross' + - ${{ if ne(parameters.osGroup, 'linux') }}: + - name: LinuxCrossArg + value: '' + condition: ${{ parameters.condition }} + steps: + - task: UseDotNet@2 # This should match what jitutils YML uses to build. displayName: 'Install .NET SDK' @@ -50,12 +58,14 @@ jobs: version: '6.x' includePreviewVersions: true installationPath: $(Agent.ToolsDirectory)/dotnet - - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/jitformat.py -c $(Build.SourcesDirectory)/src/coreclr -o $(osGroup) -a $(archType) + + - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/jitformat.py -r $(Build.SourcesDirectory) -o $(osGroup) -a $(archType) $(LinuxCrossArg) displayName: Run jitformat.py + - task: PublishBuildArtifacts@1 displayName: Publish format.patch inputs: - PathtoPublish: '$(Build.SourcesDirectory)/src/coreclr/format.patch' + PathtoPublish: '$(Build.SourcesDirectory)/format.patch' ArtifactName: format.$(osGroup).$(archType).patch continueOnError: true condition: failed() diff --git a/eng/pipelines/coreclr/templates/perf-job.yml b/eng/pipelines/coreclr/templates/perf-job.yml index de548b608a34f..53074d556c009 100644 --- a/eng/pipelines/coreclr/templates/perf-job.yml +++ b/eng/pipelines/coreclr/templates/perf-job.yml @@ -21,6 +21,7 @@ parameters: javascriptEngine: 'NoJS' iOSLlvmBuild: 'False' iOSStripSymbols: 'False' + hybridGlobalization: 'False' skipLiveLibrariesDownload: false collectHelixLogsScript: '' timeoutInMinutes: 320 @@ -39,8 +40,8 @@ jobs: - template: ${{ parameters.runJobTemplate }} parameters: # Compute job name from template parameters - jobName: ${{ format('perfbuild_{0}{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_{9}_{10}_{11}_{12}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.runtimeType, parameters.codeGenType, parameters.runKind, parameters.logicalMachine, parameters.javascriptEngine, parameters.pgoRunType, parameters.physicalPromotionRunType, parameters.iosLlvmBuild, parameters.iosStripSymbols) }} - displayName: ${{ format('Performance {0}{1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.runtimeType, parameters.codeGenType, parameters.runKind, parameters.logicalMachine, parameters.javascriptEngine, parameters.pgoRunType, parameters.physicalPromotionRunType, parameters.iosLlvmBuild, parameters.iosStripSymbols) }} + jobName: ${{ format('perfbuild_{0}{1}_{2}_{3}_{4}_{5}_{6}_{7}_{8}_{9}_{10}_{11}_{12}_{13}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.runtimeType, parameters.codeGenType, parameters.runKind, parameters.logicalMachine, parameters.javascriptEngine, parameters.pgoRunType, parameters.physicalPromotionRunType, parameters.iosLlvmBuild, parameters.iosStripSymbols, parameters.hybridGlobalization) }} + displayName: ${{ format('Performance {0}{1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {11} {12} {13}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig, parameters.runtimeType, parameters.codeGenType, parameters.runKind, parameters.logicalMachine, parameters.javascriptEngine, parameters.pgoRunType, parameters.physicalPromotionRunType, parameters.iosLlvmBuild, parameters.iosStripSymbols, parameters.hybridGlobalization) }} pool: ${{ parameters.pool }} buildConfig: ${{ parameters.buildConfig }} archType: ${{ parameters.archType }} @@ -60,6 +61,7 @@ jobs: javascriptEngine: ${{ parameters.javascriptEngine }} iosLlvmBuild: ${{ parameters.iosLlvmBuild }} iosStripSymbols: ${{ parameters.iosStripSymbols }} + hybridGlobalization: ${{ parameters.hybridGlobalization }} timeoutInMinutes: ${{ parameters.timeoutInMinutes }} ${{ if and(eq(parameters.runtimeType, 'wasm'), eq(parameters.codeGenType, 'aot')) }}: @@ -99,7 +101,7 @@ jobs: ${{ if and(eq(parameters.runtimeType, 'mono'), ne(parameters.codeGenType, 'AOT')) }}: extraSetupParameters: --architecture ${{ parameters.archType }} --monodotnet $(Build.SourcesDirectory)/.dotnet-mono ${{ if and(eq(parameters.runtimeType, 'wasm'), ne(parameters.codeGenType, 'AOT')) }}: - extraSetupParameters: --architecture ${{ parameters.archType }} --wasmbundle $(librariesDownloadDir)/bin/wasm --javascriptengine ${{ parameters.javascriptEngine }} $(extraSetupParametersSuffix) + extraSetupParameters: --architecture ${{ parameters.archType }} --wasmbundle $(librariesDownloadDir)/bin/wasm --javascriptengine ${{ parameters.javascriptEngine }} --hybridglobalization ${{ parameters.hybridGlobalization }} $(extraSetupParametersSuffix) ${{ if and(eq(parameters.runtimeType, 'wasm'), eq(parameters.codeGenType, 'AOT')) }}: extraSetupParameters: --architecture ${{ parameters.archType }} --wasmbundle $(librariesDownloadDir)/bin/wasm --wasmaot --javascriptengine ${{ parameters.javascriptEngine }} $(extraSetupParametersSuffix) ${{ if and(eq(parameters.codeGenType, 'AOT'), ne(parameters.runtimeType, 'wasm')) }}: @@ -111,9 +113,9 @@ jobs: ${{ if in(parameters.runtimeType, 'AndroidMono') }}: extraSetupParameters: -Architecture ${{ parameters.archType }} -AndroidMono ${{ if in(parameters.runtimeType, 'iOSMono') }}: - extraSetupParameters: --architecture ${{ parameters.archType }} --iosmono --iosllvmbuild ${{ parameters.iOSLlvmBuild }} --iosstripsymbols ${{ parameters.iOSStripSymbols }} + extraSetupParameters: --architecture ${{ parameters.archType }} --iosmono --iosllvmbuild ${{ parameters.iOSLlvmBuild }} --iosstripsymbols ${{ parameters.iOSStripSymbols }} --hybridglobalization ${{ parameters.hybridGlobalization }} ${{ if in(parameters.runtimeType, 'iOSNativeAOT') }}: - extraSetupParameters: --architecture ${{ parameters.archType }} --iosnativeaot --iosllvmbuild ${{ parameters.iOSLlvmBuild }} --iosstripsymbols ${{ parameters.iOSStripSymbols }} + extraSetupParameters: --architecture ${{ parameters.archType }} --iosnativeaot --iosllvmbuild ${{ parameters.iOSLlvmBuild }} --iosstripsymbols ${{ parameters.iOSStripSymbols }} --hybridglobalization ${{ parameters.hybridGlobalization }} variables: - ${{ each variable in parameters.variables }}: @@ -237,23 +239,23 @@ jobs: unpackFolder: $(Build.SourcesDirectory)/iosHelloWorld cleanUnpackFolder: false ${{ if and(eq(parameters.runtimeType, 'iOSMono'), eq(parameters.iOSLlvmBuild, 'False'), eq(parameters.iOSStripSymbols, 'False')) }}: - artifactName: 'iOSSampleAppNoLLVMSymbols' - artifactFileName: 'iOSSampleAppNoLLVMSymbols.zip' + artifactName: 'iOSSampleAppNoLLVMSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' + artifactFileName: 'iOSSampleAppNoLLVMSymbolsHybridGlobalization${{parameters.hybridGlobalization}}.zip' ${{ if and(eq(parameters.runtimeType, 'iOSMono'), eq(parameters.iOSLlvmBuild, 'False'), eq(parameters.iOSStripSymbols, 'True')) }}: - artifactName: 'iOSSampleAppNoLLVMNoSymbols' - artifactFileName: 'iOSSampleAppNoLLVMNoSymbols.zip' + artifactName: 'iOSSampleAppNoLLVMNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' + artifactFileName: 'iOSSampleAppNoLLVMNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}.zip' ${{ if and(eq(parameters.runtimeType, 'iOSMono'), eq(parameters.iOSLlvmBuild, 'True'), eq(parameters.iOSStripSymbols, 'False')) }}: - artifactName: 'iOSSampleAppLLVMSymbols' - artifactFileName: 'iOSSampleAppLLVMSymbols.zip' + artifactName: 'iOSSampleAppLLVMSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' + artifactFileName: 'iOSSampleAppLLVMSymbolsHybridGlobalization${{parameters.hybridGlobalization}}.zip' ${{ if and(eq(parameters.runtimeType, 'iOSMono'), eq(parameters.iOSLlvmBuild, 'True'), eq(parameters.iOSStripSymbols, 'True')) }}: - artifactName: 'iOSSampleAppLLVMNoSymbols' - artifactFileName: 'iOSSampleAppLLVMNoSymbols.zip' + artifactName: 'iOSSampleAppLLVMNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' + artifactFileName: 'iOSSampleAppLLVMNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}.zip' ${{ if and(eq(parameters.runtimeType, 'iOSNativeAOT'), eq(parameters.iOSStripSymbols, 'False')) }}: - artifactName: 'iOSSampleAppSymbols' - artifactFileName: 'iOSSampleAppSymbols.zip' + artifactName: 'iOSSampleAppSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' + artifactFileName: 'iOSSampleAppSymbolsHybridGlobalization${{parameters.hybridGlobalization}}.zip' ${{ if and(eq(parameters.runtimeType, 'iOSNativeAOT'), eq(parameters.iOSStripSymbols, 'True')) }}: - artifactName: 'iOSSampleAppNoSymbols' - artifactFileName: 'iOSSampleAppNoSymbols.zip' + artifactName: 'iOSSampleAppNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' + artifactFileName: 'iOSSampleAppNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}.zip' displayName: 'iOS Sample App' # same artifact as above but don't extract .zip - task: DownloadBuildArtifacts@0 @@ -263,17 +265,17 @@ jobs: downloadType: single downloadPath: '$(Build.SourcesDirectory)/iosHelloWorldZip' ${{ if and(eq(parameters.runtimeType, 'iOSMono'), eq(parameters.iOSLlvmBuild, 'False'), eq(parameters.iOSStripSymbols, 'False')) }}: - artifactName: 'iOSSampleAppNoLLVMSymbols' + artifactName: 'iOSSampleAppNoLLVMSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' ${{ if and(eq(parameters.runtimeType, 'iOSMono'), eq(parameters.iOSLlvmBuild, 'False'), eq(parameters.iOSStripSymbols, 'True')) }}: - artifactName: 'iOSSampleAppNoLLVMNoSymbols' + artifactName: 'iOSSampleAppNoLLVMNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' ${{ if and(eq(parameters.runtimeType, 'iOSMono'), eq(parameters.iOSLlvmBuild, 'True'), eq(parameters.iOSStripSymbols, 'False')) }}: - artifactName: 'iOSSampleAppLLVMSymbols' + artifactName: 'iOSSampleAppLLVMSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' ${{ if and(eq(parameters.runtimeType, 'iOSMono'), eq(parameters.iOSLlvmBuild, 'True'), eq(parameters.iOSStripSymbols, 'True')) }}: - artifactName: 'iOSSampleAppLLVMNoSymbols' + artifactName: 'iOSSampleAppLLVMNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' ${{ if and(eq(parameters.runtimeType, 'iOSNativeAOT'), eq(parameters.iOSStripSymbols, 'False')) }}: - artifactName: 'iOSSampleAppSymbols' + artifactName: 'iOSSampleAppSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' ${{ if and(eq(parameters.runtimeType, 'iOSNativeAOT'), eq(parameters.iOSStripSymbols, 'True')) }}: - artifactName: 'iOSSampleAppNoSymbols' + artifactName: 'iOSSampleAppNoSymbolsHybridGlobalization${{parameters.hybridGlobalization}}' checkDownloadedFiles: true # Create Core_Root diff --git a/eng/pipelines/coreclr/templates/run-scenarios-job.yml b/eng/pipelines/coreclr/templates/run-scenarios-job.yml index bc142530cc52d..fe3a9b1e9ba9d 100644 --- a/eng/pipelines/coreclr/templates/run-scenarios-job.yml +++ b/eng/pipelines/coreclr/templates/run-scenarios-job.yml @@ -216,6 +216,6 @@ jobs: displayName: Publish Logs inputs: targetPath: $(Build.SourcesDirectory)/artifacts/log - artifactName: 'Performance_Run_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.runtimeType }}_${{ parameters.codeGenType }}_${{ parameters.runKind }}_$(iOSLlvmBuild)_$(iOSStripSymbols)' + artifactName: 'Performance_Run_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.runtimeType }}_${{ parameters.codeGenType }}_${{ parameters.runKind }}_$(iOSLlvmBuild)_$(iOSStripSymbols)_$(hybridGlobalization)' continueOnError: true condition: always() diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-android.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-android.yml index 23c57b87fe538..9fd1769fe18fa 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-android.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-android.yml @@ -44,10 +44,11 @@ jobs: # Turn off the testing for now, until https://github.com/dotnet/runtime/issues/60128 gets resolved # ${{ if eq(variables['isRollingBuild'], true) }}: # # extra steps, run tests - # extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - # extraStepsParameters: - # creator: dotnet-bot - # testRunNamePrefixSuffix: Mono_$(_BuildConfig) + # postBuildSteps: + # - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + # parameters: + # creator: dotnet-bot + # testRunNamePrefixSuffix: Mono_$(_BuildConfig) # extraVariablesTemplates: # - template: /eng/pipelines/common/templates/runtimes/test-variables.yml @@ -77,7 +78,8 @@ jobs: buildArgs: -s mono+libs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true $(_runSmokeTestsOnlyArg) /p:EnableAdditionalTimezoneChecks=true timeoutInMinutes: 480 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-androidemulator.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-androidemulator.yml index 65a890976e739..a114b1b744a13 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-androidemulator.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-androidemulator.yml @@ -40,10 +40,11 @@ jobs: buildArgs: -s mono+libs -c $(_BuildConfig) timeoutInMinutes: 240 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml @@ -78,10 +79,11 @@ jobs: buildArgs: -s mono+libs -c $(_BuildConfig) timeoutInMinutes: 240 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml @@ -111,7 +113,8 @@ jobs: buildArgs: -s mono+libs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true $(_runSmokeTestsOnlyArg) timeoutInMinutes: 180 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslike.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslike.yml index 84477d2c25a8a..ef0425042b5e6 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslike.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslike.yml @@ -38,11 +38,12 @@ jobs: buildArgs: -s mono+libs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true /p:DevTeamProvisioning=- /p:RunAOTCompilation=true $(_runSmokeTestsOnlyArg) /p:BuildTestsOnHelix=true /p:EnableAdditionalTimezoneChecks=true /p:UsePortableRuntimePack=true /p:BuildDarwinFrameworks=true /p:IsManualOrRollingBuild=true timeoutInMinutes: 480 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) - extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true # # iOS/tvOS devices @@ -80,14 +81,15 @@ jobs: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: testGroup: innerloop - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - compileOnHelix: true - interpreter: true - testBuildArgs: /p:ArchiveTests=true /p:DevTeamProvisioning=- /p:RunAOTCompilation=true /p:MonoForceInterpreter=true /p:BuildTestsOnHelix=true - testRunNamePrefixSuffix: Mono_$(_BuildConfig) - extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + compileOnHelix: true + interpreter: true + testBuildArgs: /p:ArchiveTests=true /p:DevTeamProvisioning=- /p:RunAOTCompilation=true /p:MonoForceInterpreter=true /p:BuildTestsOnHelix=true + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true # # iOS/tvOS devices @@ -116,11 +118,12 @@ jobs: buildArgs: --cross -s clr.alljits+clr.tools+clr.nativeaotruntime+clr.nativeaotlibs+libs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true /p:RunSmokeTestsOnly=true /p:DevTeamProvisioning=- /p:BuildTestsOnHelix=true /p:UseNativeAOTRuntime=true /p:RunAOTCompilation=false /p:ContinuousIntegrationBuild=true timeoutInMinutes: 180 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) - extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true # # Build the whole product using NativeAOT for iOS/tvOS and run runtime tests with iOS/tvOS devices @@ -151,8 +154,9 @@ jobs: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: testGroup: innerloop - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslikesimulator.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslikesimulator.yml index c7bcfc15e132b..b11b4be72ed6e 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslikesimulator.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-ioslikesimulator.yml @@ -40,11 +40,12 @@ jobs: buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true $(_runSmokeTestsOnlyArg) /p:RunAOTCompilation=true /p:MonoForceInterpreter=true /p:BuildDarwinFrameworks=true timeoutInMinutes: 180 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - interpreter: true - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + interpreter: true + testRunNamePrefixSuffix: Mono_$(_BuildConfig) # # Build the whole product using Mono for iOSSimulator/tvOSSimulator and run runtime tests with iOS/tvOS simulators @@ -84,14 +85,15 @@ jobs: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: testGroup: innerloop - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - compileOnHelix: true - interpreter: true - testBuildArgs: /p:ArchiveTests=true /p:DevTeamProvisioning=- /p:RunAOTCompilation=true /p:MonoForceInterpreter=true /p:BuildTestsOnHelix=true - testRunNamePrefixSuffix: Mono_$(_BuildConfig) - extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + compileOnHelix: true + interpreter: true + testBuildArgs: /p:ArchiveTests=true /p:DevTeamProvisioning=- /p:RunAOTCompilation=true /p:MonoForceInterpreter=true /p:BuildTestsOnHelix=true + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true # # Build the whole product using Native AOT for iOSSimulator/tvOSSimulator and run runtime tests with iOS/tvOS simulators @@ -131,8 +133,9 @@ jobs: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: testGroup: innerloop - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-linuxbionic.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-linuxbionic.yml index 352eabe9d22d4..7eb5fa47f8082 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-linuxbionic.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-linuxbionic.yml @@ -42,7 +42,8 @@ jobs: buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true timeoutInMinutes: 480 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig)_LinuxBionic + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig)_LinuxBionic diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml index 736bf0516dfdf..936fe60bb4833 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml @@ -37,10 +37,11 @@ jobs: buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true /p:DevTeamProvisioning=adhoc /p:RunAOTCompilation=true /p:MonoForceInterpreter=true /p:BuildDarwinFrameworks=true timeoutInMinutes: 180 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) # # MacCatalyst interp - requires AOT Compilation and Interp flags @@ -70,8 +71,9 @@ jobs: buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true $(_runSmokeTestsOnlyArg) /p:DevTeamProvisioning=adhoc /p:RunAOTCompilation=true /p:MonoForceInterpreter=true /p:BuildDarwinFrameworks=true /p:EnableAppSandbox=true timeoutInMinutes: 180 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - interpreter: true - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + interpreter: true + testRunNamePrefixSuffix: Mono_$(_BuildConfig) diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml index 42900f2e36ff2..e518d64988c93 100644 --- a/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml +++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-other.yml @@ -120,15 +120,16 @@ jobs: eq(dependencies.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true), eq(variables['isRollingBuild'], true)) # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) - condition: >- - or( - eq(variables['librariesContainsChange'], true), - eq(variables['monoContainsChange'], true), - eq(variables['isRollingBuild'], true)) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + condition: >- + or( + eq(variables['librariesContainsChange'], true), + eq(variables['monoContainsChange'], true), + eq(variables['isRollingBuild'], true)) # # Build the whole product using Mono and run runtime tests @@ -221,11 +222,12 @@ jobs: eq(dependencies.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true), eq(variables['isRollingBuild'], true)) - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - llvmAotStepContainer: linux_x64_llvmaot - testRunNamePrefixSuffix: Mono_Release + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + llvmAotStepContainer: linux_x64_llvmaot + testRunNamePrefixSuffix: Mono_Release extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml diff --git a/eng/pipelines/libraries/outerloop-mono.yml b/eng/pipelines/libraries/outerloop-mono.yml index e15fc35f9d41c..34b1af3c71b56 100644 --- a/eng/pipelines/libraries/outerloop-mono.yml +++ b/eng/pipelines/libraries/outerloop-mono.yml @@ -39,11 +39,12 @@ extends: timeoutInMinutes: 180 includeAllPlatforms: ${{ variables['isRollingBuild'] }} # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - testScope: outerloop - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + testScope: outerloop + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) - template: /eng/pipelines/common/platform-matrix.yml parameters: @@ -60,10 +61,11 @@ extends: timeoutInMinutes: 180 includeAllPlatforms: ${{ variables['isRollingBuild'] }} # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - scenarios: - - normal - testScope: outerloop - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + scenarios: + - normal + testScope: outerloop + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) diff --git a/eng/pipelines/libraries/outerloop.yml b/eng/pipelines/libraries/outerloop.yml index 121d405fc7c7c..597f298c37a3e 100644 --- a/eng/pipelines/libraries/outerloop.yml +++ b/eng/pipelines/libraries/outerloop.yml @@ -45,11 +45,12 @@ extends: timeoutInMinutes: 180 includeAllPlatforms: ${{ variables['isRollingBuild'] }} # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - testScope: outerloop - creator: dotnet-bot - testRunNamePrefixSuffix: CoreCLR_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + testScope: outerloop + creator: dotnet-bot + testRunNamePrefixSuffix: CoreCLR_$(_BuildConfig) - ${{ if eq(variables['isRollingBuild'], false) }}: - template: /eng/pipelines/common/platform-matrix.yml @@ -73,11 +74,12 @@ extends: timeoutInMinutes: 180 includeAllPlatforms: ${{ variables['isRollingBuild'] }} # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - testScope: outerloop - creator: dotnet-bot - testRunNamePrefixSuffix: CoreCLR_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + testScope: outerloop + creator: dotnet-bot + testRunNamePrefixSuffix: CoreCLR_$(_BuildConfig) - ${{ if eq(variables['includeWindowsOuterloop'], true) }}: - template: /eng/pipelines/common/platform-matrix.yml @@ -97,8 +99,9 @@ extends: timeoutInMinutes: 180 includeAllPlatforms: ${{ variables['isRollingBuild'] }} # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - testScope: outerloop - creator: dotnet-bot - extraHelixArguments: /p:BuildTargetFramework=net48 + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + testScope: outerloop + creator: dotnet-bot + extraHelixArguments: /p:BuildTargetFramework=net48 diff --git a/eng/pipelines/runtime-android-grpc-client-tests.yml b/eng/pipelines/runtime-android-grpc-client-tests.yml index 00e51f766d95c..b6613db9f8b84 100644 --- a/eng/pipelines/runtime-android-grpc-client-tests.yml +++ b/eng/pipelines/runtime-android-grpc-client-tests.yml @@ -43,8 +43,9 @@ extends: buildArgs: -s mono+libs+host+packs+libs.tests -c $(_BuildConfig) /p:ArchiveTests=true /p:RunGrpcTestsOnly=true /p:BuildGrpcServerDockerImage=true timeoutInMinutes: 180 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - extraHelixArguments: /p:RunGrpcTestsOnly=true /p:BuildGrpcServerDockerImage=true - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + extraStepsTemplats: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + extraHelixArguments: /p:RunGrpcTestsOnly=true /p:BuildGrpcServerDockerImage=true + testRunNamePrefixSuffix: Mono_$(_BuildConfig) diff --git a/eng/pipelines/runtime-community.yml b/eng/pipelines/runtime-community.yml index bab086f75c23d..a91388e244b0b 100644 --- a/eng/pipelines/runtime-community.yml +++ b/eng/pipelines/runtime-community.yml @@ -71,15 +71,16 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), eq(variables['isRollingBuild'], true)) # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) - condition: >- - or( - eq(variables['librariesContainsChange'], true), - eq(variables['monoContainsChange'], true), - eq(variables['isRollingBuild'], true)) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + condition: >- + or( + eq(variables['librariesContainsChange'], true), + eq(variables['monoContainsChange'], true), + eq(variables['isRollingBuild'], true)) # # Build the whole product using Mono @@ -138,7 +139,8 @@ extends: eq(variables['isRollingBuild'], true)) ${{ if eq(variables['isRollingBuild'], true) }}: # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) diff --git a/eng/pipelines/runtime-linker-tests.yml b/eng/pipelines/runtime-linker-tests.yml index ecba36a7b02c9..ee9c4803eb44c 100644 --- a/eng/pipelines/runtime-linker-tests.yml +++ b/eng/pipelines/runtime-linker-tests.yml @@ -108,7 +108,8 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_non_mono_and_wasm.containsChange'], true), eq(variables['isRollingBuild'], true)) buildArgs: -s clr+libs+tools.illink -c $(_BuildConfig) - extraStepsTemplate: /eng/pipelines/libraries/execute-trimming-tests-steps.yml + extraStepsTemplats: + - template: /eng/pipelines/libraries/execute-trimming-tests-steps.yml # # Build Release config vertical for Browser-wasm @@ -131,6 +132,7 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_wasm_specific_except_wbt_dbg.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_tools_illink.containsChange'], true), eq(dependencies.evaluate_paths.outputs['DarcDependenciesChanged.Microsoft_NET_ILLink_Tasks'], true)) - extraStepsTemplate: /eng/pipelines/libraries/execute-trimming-tests-steps.yml - extraStepsParameters: - extraTestArgs: '/p:WasmBuildNative=false' + postBuildSteps: + - template: /eng/pipelines/libraries/execute-trimming-tests-steps.yml + parameters: + extraTestArgs: '/p:WasmBuildNative=false' diff --git a/eng/pipelines/runtime-llvm.yml b/eng/pipelines/runtime-llvm.yml index e31e623a0353c..9d358e5f79308 100644 --- a/eng/pipelines/runtime-llvm.yml +++ b/eng/pipelines/runtime-llvm.yml @@ -119,7 +119,7 @@ extends: testGroup: innerloop nameSuffix: AllSubsets_Mono_LLVMAOT buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) - /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true condition: >- or( eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), @@ -138,7 +138,7 @@ extends: testGroup: innerloop nameSuffix: AllSubsets_Mono_LLVMAOT buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) - /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true condition: >- or( eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), diff --git a/eng/pipelines/runtime-official.yml b/eng/pipelines/runtime-official.yml index 0579ef0feda81..d8fd9bacc23df 100644 --- a/eng/pipelines/runtime-official.yml +++ b/eng/pipelines/runtime-official.yml @@ -124,9 +124,10 @@ extends: buildArgs: -s clr.nativeaotlibs+clr.nativeaotruntime+libs+packs -c $(_BuildConfig) /p:BuildNativeAOTRuntimePack=true /p:SkipLibrariesNativeRuntimePackages=true nameSuffix: AllSubsets_NativeAOT isOfficialBuild: ${{ variables.isOfficialBuild }} - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: NativeAOTRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: NativeAOTRuntimePacks # # Build Mono runtime packs @@ -166,9 +167,10 @@ extends: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:BuildMonoAOTCrossCompiler=false nameSuffix: AllSubsets_Mono isOfficialBuild: ${{ variables.isOfficialBuild }} - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks - template: /eng/pipelines/common/platform-matrix.yml parameters: @@ -182,9 +184,10 @@ extends: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:AotHostArchitecture=x64 /p:AotHostOS=$(_hostedOS) nameSuffix: AllSubsets_Mono isOfficialBuild: ${{ variables.isOfficialBuild }} - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks - template: /eng/pipelines/common/platform-matrix.yml parameters: @@ -198,9 +201,10 @@ extends: nameSuffix: AllSubsets_Mono_multithread isOfficialBuild: ${{ variables.isOfficialBuild }} runtimeVariant: multithread - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks # Build Mono AOT offset headers once, for consumption elsewhere # @@ -242,9 +246,10 @@ extends: - android - browser isOfficialBuild: ${{ variables.isOfficialBuild }} - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks - template: /eng/pipelines/common/platform-matrix.yml parameters: @@ -265,9 +270,10 @@ extends: - android - browser isOfficialBuild: ${{ variables.isOfficialBuild }} - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks - template: /eng/pipelines/common/platform-matrix.yml parameters: @@ -295,9 +301,10 @@ extends: - ios - maccatalyst isOfficialBuild: ${{ variables.isOfficialBuild }} - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks # # Build Mono LLVM runtime packs @@ -325,22 +332,24 @@ extends: nameSuffix: AllSubsets_Mono_LLVMJIT runtimeVariant: LLVMJIT isOfficialBuild: ${{ variables.isOfficialBuild }} - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks #LLVMAOT - jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release runtimeFlavor: mono jobParameters: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) - /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true nameSuffix: AllSubsets_Mono_LLVMAOT runtimeVariant: LLVMAOT isOfficialBuild: ${{ variables.isOfficialBuild }} - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: MonoRuntimePacks + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: MonoRuntimePacks # # Build libraries using live CoreLib from CoreCLR @@ -395,9 +404,10 @@ extends: - SourceBuild_linux_x64 jobParameters: nameSuffix: PortableSourceBuild - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: SourceBuildPackages + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: SourceBuildPackages timeoutInMinutes: 95 # @@ -439,12 +449,13 @@ extends: - windows_arm64 - linux_arm64 jobParameters: - buildArgs: -s clr.native+clr.corelib+clr.tools+clr.nativecorelib+libs+host+packs -c $(_BuildConfig) -pgoinstrument + buildArgs: -s clr.native+clr.corelib+clr.tools+clr.nativecorelib+libs+host+packs -c $(_BuildConfig) -pgoinstrument /p:SkipLibrariesNativeRuntimePackages=true isOfficialBuild: ${{ variables.isOfficialBuild }} nameSuffix: PGO - extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml - extraStepsParameters: - name: PGO + postBuildSteps: + - template: /eng/pipelines/common/upload-intermediate-artifacts-step.yml + parameters: + name: PGO timeoutInMinutes: 95 # diff --git a/eng/pipelines/runtime-sanitized.yml b/eng/pipelines/runtime-sanitized.yml index 3bc49fec69083..5db421762ed0d 100644 --- a/eng/pipelines/runtime-sanitized.yml +++ b/eng/pipelines/runtime-sanitized.yml @@ -38,13 +38,14 @@ extends: buildArgs: -s clr+libs -c $(_BuildConfig) $(_nativeSanitizersArg) timeoutInMinutes: 300 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: CoreCLR_$(_BuildConfig) - scenarios: - - normal - - no_tiered_compilation + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: CoreCLR_$(_BuildConfig) + scenarios: + - normal + - no_tiered_compilation extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: @@ -72,12 +73,13 @@ extends: buildArgs: -s clr+libs+libs.tests -c $(_BuildConfig) -rc Checked $(_nativeSanitizersArg) /p:ArchiveTests=true timeoutInMinutes: 180 # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Libraries_$(_BuildConfig) - scenarios: - - normal + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Libraries_$(_BuildConfig) + scenarios: + - normal # # NativeAOT release build and smoke tests with AddressSanitizer @@ -98,11 +100,12 @@ extends: timeoutInMinutes: 120 nameSuffix: NativeAOT buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release $(_nativeSanitizersArg) - extraStepsTemplate: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml - extraStepsParameters: - creator: dotnet-bot - testBuildArgs: nativeaot tree nativeaot - liveLibrariesBuildConfig: Release + postBuildSteps: + - template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml + parameters: + creator: dotnet-bot + testBuildArgs: nativeaot tree nativeaot + liveLibrariesBuildConfig: Release extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: diff --git a/eng/pipelines/runtime-wasm-perf.yml b/eng/pipelines/runtime-wasm-perf.yml index bd6a6d979e3e4..120cc2df85307 100644 --- a/eng/pipelines/runtime-wasm-perf.yml +++ b/eng/pipelines/runtime-wasm-perf.yml @@ -4,6 +4,20 @@ trigger: none +pr: + branches: + include: + - main + paths: + include: + - eng/pipelines/runtime-wasm-perf.yml + - eng/pipelines/coreclr/perf*.yml + - eng/pipelines/coreclr/templates/perf-job.yml + - eng/pipelines/coreclr/templates/run-perf* + - eng/pipelines/coreclr/templates/run-scenarios-job.yml + - eng/testing/performance/* + - eng/testing/ChromeVersions.props + variables: - template: /eng/pipelines/common/variables.yml diff --git a/eng/pipelines/runtime.yml b/eng/pipelines/runtime.yml index 9f53f28da3098..22f4ceab01136 100644 --- a/eng/pipelines/runtime.yml +++ b/eng/pipelines/runtime.yml @@ -108,9 +108,10 @@ extends: testGroup: innerloop nameSuffix: Native_GCC buildArgs: -s clr.native+libs.native+mono+host.native -c $(_BuildConfig) -gcc - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests.yml - extraStepsParameters: - testBuildArgs: skipmanaged skipgeneratelayout skiprestorepackages -gcc + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests.yml + parameters: + testBuildArgs: skipmanaged skipgeneratelayout skiprestorepackages -gcc condition: >- or( eq(dependencies.evaluate_paths.outputs['SetPathVars_coreclr.containsChange'], true), @@ -254,12 +255,13 @@ extends: timeoutInMinutes: 120 nameSuffix: NativeAOT buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release - extraStepsTemplate: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml - extraStepsParameters: - creator: dotnet-bot - testBuildArgs: nativeaot tree nativeaot - liveLibrariesBuildConfig: Release - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml + parameters: + creator: dotnet-bot + testBuildArgs: nativeaot tree nativeaot + liveLibrariesBuildConfig: Release + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: @@ -291,12 +293,13 @@ extends: timeoutInMinutes: 180 nameSuffix: NativeAOT buildArgs: -s clr.aot+host.native+libs -rc $(_BuildConfig) -lc Release -hc Release - extraStepsTemplate: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml - extraStepsParameters: - creator: dotnet-bot - testBuildArgs: 'nativeaot tree ";nativeaot;Loader;Interop;tracing;" /p:BuildNativeAotFrameworkObjects=true' - liveLibrariesBuildConfig: Release - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml + parameters: + creator: dotnet-bot + testBuildArgs: 'nativeaot tree ";nativeaot;Loader;Interop;" /p:BuildNativeAotFrameworkObjects=true' + liveLibrariesBuildConfig: Release + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: @@ -334,12 +337,13 @@ extends: timeoutInMinutes: 120 nameSuffix: NativeAOT buildArgs: -s clr.aot+host.native+libs+tools.illink -c $(_BuildConfig) -rc $(_BuildConfig) -lc Release -hc Release - extraStepsTemplate: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml - extraStepsParameters: - creator: dotnet-bot - testBuildArgs: 'nativeaot tree ";nativeaot;tracing/eventpipe/providervalidation;"' - liveLibrariesBuildConfig: Release - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/coreclr/nativeaot-post-build-steps.yml + parameters: + creator: dotnet-bot + testBuildArgs: 'nativeaot tree ";nativeaot;tracing/eventpipe/providervalidation;"' + liveLibrariesBuildConfig: Release + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml parameters: @@ -371,10 +375,11 @@ extends: buildArgs: -s clr.aot+host.native+libs+libs.tests -c $(_BuildConfig) /p:TestNativeAot=true /p:RunSmokeTestsOnly=true /p:ArchiveTests=true timeoutInMinutes: 240 # Doesn't actually take long, but we've seen the ARM64 Helix queue often get backlogged for 2+ hours # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) condition: >- or( eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), @@ -673,16 +678,17 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true), eq(variables['isRollingBuild'], true)) # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) - extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true - condition: >- - or( - eq(variables['librariesContainsChange'], true), - eq(variables['monoContainsChange'], true), - eq(variables['isRollingBuild'], true)) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true + condition: >- + or( + eq(variables['librariesContainsChange'], true), + eq(variables['monoContainsChange'], true), + eq(variables['isRollingBuild'], true)) # # iOS/tvOS devices @@ -715,16 +721,17 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true), eq(variables['isRollingBuild'], true)) # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) - extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true - condition: >- - or( - eq(variables['librariesContainsChange'], true), - eq(variables['coreclrContainsChange'], true), - eq(variables['isRollingBuild'], true)) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig) + extraHelixArguments: /p:NeedsToBuildAppsOnHelix=true + condition: >- + or( + eq(variables['librariesContainsChange'], true), + eq(variables['coreclrContainsChange'], true), + eq(variables['isRollingBuild'], true)) # # MacCatalyst interp - requires AOT Compilation and Interp flags @@ -758,15 +765,16 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_installer.containsChange'], true), eq(variables['isRollingBuild'], true)) # extra steps, run tests - extraStepsTemplate: /eng/pipelines/libraries/helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_$(_BuildConfig) - condition: >- - or( - eq(variables['librariesContainsChange'], true), - eq(variables['monoContainsChange'], true), - eq(variables['isRollingBuild'], true)) + postBuildSteps: + - template: /eng/pipelines/libraries/helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_$(_BuildConfig) + condition: >- + or( + eq(variables['librariesContainsChange'], true), + eq(variables['monoContainsChange'], true), + eq(variables['isRollingBuild'], true)) # # Build Mono and Installer on LLVMJIT mode @@ -825,7 +833,7 @@ extends: testGroup: innerloop nameSuffix: AllSubsets_Mono_LLVMAOT buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) - /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true condition: >- or( eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), @@ -844,7 +852,7 @@ extends: testGroup: innerloop nameSuffix: AllSubsets_Mono_LLVMAOT buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) - /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true condition: >- or( eq(dependencies.evaluate_paths.outputs['SetPathVars_libraries.containsChange'], true), @@ -1299,10 +1307,11 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true), eq(variables['isRollingBuild'], true)) - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_Release + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_Release extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml @@ -1334,10 +1343,11 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true), eq(variables['isRollingBuild'], true)) - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - testRunNamePrefixSuffix: Mono_Release + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + testRunNamePrefixSuffix: Mono_Release extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml # @@ -1363,7 +1373,7 @@ extends: testGroup: innerloop nameSuffix: AllSubsets_Mono_LLVMAot_RuntimeTests runtimeVariant: llvmaot - buildArgs: -s mono+libs+clr.hosts+clr.iltools -c Release /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true + buildArgs: -s mono+libs+clr.hosts+clr.iltools -c Release /p:MonoEnableLLVM=true /p:MonoAOTEnableLLVM=true /p:MonoBundleLLVMOptimizer=true timeoutInMinutes: 180 condition: >- @@ -1371,11 +1381,12 @@ extends: eq(dependencies.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true), eq(variables['isRollingBuild'], true)) - extraStepsTemplate: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml - extraStepsParameters: - creator: dotnet-bot - llvmAotStepContainer: linux_x64_llvmaot - testRunNamePrefixSuffix: Mono_Release + postBuildSteps: + - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml + parameters: + creator: dotnet-bot + llvmAotStepContainer: linux_x64_llvmaot + testRunNamePrefixSuffix: Mono_Release extraVariablesTemplates: - template: /eng/pipelines/common/templates/runtimes/test-variables.yml diff --git a/eng/pipelines/runtimelab.yml b/eng/pipelines/runtimelab.yml index a5c4e03343311..7c34126757d73 100644 --- a/eng/pipelines/runtimelab.yml +++ b/eng/pipelines/runtimelab.yml @@ -65,9 +65,10 @@ extends: timeoutInMinutes: 100 testGroup: innerloop buildArgs: -s clr+libs+host+packs -c debug -runtimeConfiguration Checked - extraStepsTemplate: /eng/pipelines/runtimelab/runtimelab-post-build-steps.yml - extraStepsParameters: - uploadRuntimeTests: true + postBuildSteps: + - template: /eng/pipelines/runtimelab/runtimelab-post-build-steps.yml + parameters: + uploadRuntimeTests: true # # Build with Release config and Release runtimeConfiguration @@ -83,10 +84,11 @@ extends: timeoutInMinutes: 100 isOfficialBuild: ${{ variables.isOfficialBuild }} testGroup: innerloop - extraStepsTemplate: /eng/pipelines/runtimelab/runtimelab-post-build-steps.yml - extraStepsParameters: - uploadLibrariesTests: ${{ eq(variables.isOfficialBuild, false) }} - uploadIntermediateArtifacts: false + postBuildSteps: + - template: /eng/pipelines/runtimelab/runtimelab-post-build-steps.yml + parameters: + uploadLibrariesTests: ${{ eq(variables.isOfficialBuild, false) }} + uploadIntermediateArtifacts: false ${{ if eq(variables.isOfficialBuild, false) }}: buildArgs: -s clr+libs+libs.tests+host+packs -c $(_BuildConfig) /p:ArchiveTests=true ${{ if eq(variables.isOfficialBuild, true) }}: @@ -107,11 +109,12 @@ extends: nameSuffix: AllConfigurations buildArgs: -s libs -c $(_BuildConfig) -allConfigurations ${{ if eq(variables.isOfficialBuild, true) }}: - extraStepsTemplate: /eng/pipelines/runtimelab/runtimelab-post-build-steps.yml - extraStepsParameters: - uploadIntermediateArtifacts: true - isOfficialBuild: true - librariesBinArtifactName: libraries_bin_official_allconfigurations + postBuildSteps: + - template: /eng/pipelines/runtimelab/runtimelab-post-build-steps.yml + parameters: + uploadIntermediateArtifacts: true + isOfficialBuild: true + librariesBinArtifactName: libraries_bin_official_allconfigurations # Installer official builds need to build installers and need the libraries all configurations build - ${{ if eq(variables.isOfficialBuild, true) }}: diff --git a/eng/targetingpacks.targets b/eng/targetingpacks.targets index 2c34b5817b995..853a4857c4d6d 100644 --- a/eng/targetingpacks.targets +++ b/eng/targetingpacks.targets @@ -7,6 +7,7 @@ - MicrosoftNetCoreAppRefPackDir - optional: MicrosoftNetCoreAppRuntimePackDir - optional: AppHostSourcePath & SingleFileHostSourcePath + - optional: Crossgen2Dir --> @@ -19,57 +20,69 @@ '$(TargetFrameworkIdentifier)' == '.NETCoreApp' and '$(TargetFrameworkVersion)' == 'v$(NetCoreAppCurrentVersion)'"> true - $(UseLocalTargetingRuntimePack) + + + false - false + + $(UseLocalTargetingRuntimePack) + false + + $(UseLocalTargetingRuntimePack) + $(UseLocalTargetingRuntimePack) + + - + + Condition="'$(UseLocalTargetingRuntimePack)' == 'true' and '@(KnownFrameworkReference->AnyHaveMetadataValue('TargetFramework', '$(NetCoreAppCurrent)'))' != 'true'" /> - + Condition="'$(UseLocalTargetingRuntimePack)' == 'true' and '@(KnownRuntimePack->AnyHaveMetadataValue('TargetFramework', '$(NetCoreAppCurrent)')->AnyHaveMetadataValue('RuntimePackLabels', 'Mono'))' != 'true'" /> + - - - + Condition="'$(UseLocalILCompilerPack)' == 'true' and '@(KnownILCompilerPack->AnyHaveMetadataValue('TargetFramework', '$(NetCoreAppCurrent)'))' != 'true'" /> + + Condition="'$(UseLocalAppHostPack)' == 'true' and '@(KnownAppHostPack->AnyHaveMetadataValue('TargetFramework', '$(NetCoreAppCurrent)'))' != 'true'" /> @@ -100,36 +113,15 @@ - - - - - - - - - - - - - + + @@ -156,6 +148,16 @@ + + + + + + + + + + + + + + + + diff --git a/eng/testing/ChromeVersions.props b/eng/testing/ChromeVersions.props index 2b8afda96b421..e184f534e535a 100644 --- a/eng/testing/ChromeVersions.props +++ b/eng/testing/ChromeVersions.props @@ -1,11 +1,11 @@ - 117.0.5938.92 + 117.0.5938.132 1181205 https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/1181205 11.7.439 - 117.0.5938.92 + 117.0.5938.132 1181205 https://storage.googleapis.com/chromium-browser-snapshots/Win_x64/1181217 11.7.439 diff --git a/eng/testing/WasmRunnerAOTTemplate.sh b/eng/testing/WasmRunnerAOTTemplate.sh index 856fb7ebbea1b..24b23c501f211 100644 --- a/eng/testing/WasmRunnerAOTTemplate.sh +++ b/eng/testing/WasmRunnerAOTTemplate.sh @@ -34,14 +34,6 @@ if [[ -z "$XHARNESS_COMMAND" ]]; then fi if [[ "$XHARNESS_COMMAND" == "test" ]]; then - if [[ -z "$JS_ENGINE" ]]; then - if [[ "$SCENARIO" == "WasmTestOnNodeJS" || "$SCENARIO" == "wasmtestonnodejs" ]]; then - JS_ENGINE="--engine=NodeJS" - else - JS_ENGINE="--engine=V8" - fi - fi - if [[ -z "$JS_ENGINE_ARGS" ]]; then JS_ENGINE_ARGS="--engine-arg=--stack-trace-limit=1000" if [[ "$SCENARIO" != "WasmTestOnNodeJS" && "$SCENARIO" != "wasmtestonnodejs" ]]; then @@ -55,6 +47,17 @@ if [[ "$XHARNESS_COMMAND" == "test" ]]; then if [[ -z "$MAIN_JS" ]]; then MAIN_JS="--js-file=test-main.js" fi + + if [[ -z "$JS_ENGINE" ]]; then + if [[ "$SCENARIO" == "WasmTestOnNodeJS" || "$SCENARIO" == "wasmtestonnodejs" ]]; then + JS_ENGINE="--engine=NodeJS" + else + JS_ENGINE="--engine=V8" + if [[ -n "$V8_PATH_FOR_TESTS" ]]; then + JS_ENGINE_ARGS="$JS_ENGINE_ARGS --js-engine-path=$V8_PATH_FOR_TESTS" + fi + fi + fi fi if [[ -z "$XHARNESS_ARGS" ]]; then @@ -123,4 +126,4 @@ echo ----- end $(date) ----- exit code $_exitCode ------------------------------ echo "XHarness artifacts: $XHARNESS_OUT" -exit $_exitCode \ No newline at end of file +exit $_exitCode diff --git a/eng/testing/WasmRunnerTemplate.sh b/eng/testing/WasmRunnerTemplate.sh index 8b4e9adb10c2e..71347666cde80 100644 --- a/eng/testing/WasmRunnerTemplate.sh +++ b/eng/testing/WasmRunnerTemplate.sh @@ -34,14 +34,6 @@ if [[ -z "$XHARNESS_COMMAND" ]]; then fi if [[ "$XHARNESS_COMMAND" == "test" ]]; then - if [[ -z "$JS_ENGINE" ]]; then - if [[ "$SCENARIO" == "WasmTestOnNodeJS" || "$SCENARIO" == "wasmtestonnodejs" ]]; then - JS_ENGINE="--engine=NodeJS" - else - JS_ENGINE="--engine=V8" - fi - fi - if [[ -z "$MAIN_JS" ]]; then MAIN_JS="--js-file=test-main.js" fi @@ -55,6 +47,17 @@ if [[ "$XHARNESS_COMMAND" == "test" ]]; then JS_ENGINE_ARGS="$JS_ENGINE_ARGS --engine-arg=--experimental-wasm-eh" fi fi + + if [[ -z "$JS_ENGINE" ]]; then + if [[ "$SCENARIO" == "WasmTestOnNodeJS" || "$SCENARIO" == "wasmtestonnodejs" ]]; then + JS_ENGINE="--engine=NodeJS" + else + JS_ENGINE="--engine=V8" + if [[ -n "$V8_PATH_FOR_TESTS" ]]; then + JS_ENGINE_ARGS="$JS_ENGINE_ARGS --js-engine-path=$V8_PATH_FOR_TESTS" + fi + fi + fi fi if [[ -z "$XHARNESS_ARGS" ]]; then diff --git a/eng/testing/performance/blazor_perf.proj b/eng/testing/performance/blazor_perf.proj index a526a8cc3b5de..9ae6177f99251 100644 --- a/eng/testing/performance/blazor_perf.proj +++ b/eng/testing/performance/blazor_perf.proj @@ -9,8 +9,13 @@ python3 $(HelixPreCommands);chmod +x $HELIX_WORKITEM_PAYLOAD/SOD/SizeOnDisk - --has-workload --readonly-dotnet --msbuild "/p:_TrimmerDumpDependencies=true" --msbuild /warnaserror:NU1602,NU1604 --msbuild-static AdditionalMonoLinkerOptions=%27"%24(AdditionalMonoLinkerOptions) --dump-dependencies"%27 --binlog $(LogDirectory)blazor_publish.binlog + --has-workload --readonly-dotnet --msbuild "/p:_TrimmerDumpDependencies=true;/p:HybridGlobalization=$(hybridGlobalization);/warnaserror:NU1602,NU1604" --msbuild-static AdditionalMonoLinkerOptions=%27"%24(AdditionalMonoLinkerOptions) --dump-dependencies"%27 --binlog $(LogDirectory)blazor_publish.binlog $(EnvVars) $(Python) pre.py publish $(PublishArgs) + + + - HybridGlobalization + $(HybridGlobalizationPath) @@ -43,35 +48,35 @@ - + $(WorkItemDirectory) cd $(BlazorMinDirectory) && $(PublishCommand) && $(Python) test.py sod --scenario-name "%(Identity)" - + $(WorkItemDirectory) cd $(BlazorMinAOTDirectory) && $(PublishCommand) && $(Python) test.py sod --scenario-name "%(Identity)" 00:30 - + $(WorkItemDirectory) cd $(BlazorDirectory) && $(PublishCommand) && $(Python) test.py sod --scenario-name "%(Identity)" $(Python) post.py --readonly-dotnet - + $(WorkItemDirectory) cd $(BlazorAOTDirectory) && $(PublishCommand) && $(Python) test.py sod --scenario-name "%(Identity)" $(Python) post.py --readonly-dotnet 00:30 - + $(WorkItemDirectory) cd $(BlazorPizzaDirectory) && $(PublishCommand) -f $(PerflabTargetFrameworks) && $(Python) test.py sod --scenario-name "%(Identity)" --dirs $(PizzaAppPubLocation) $(Python) post.py --readonly-dotnet - + $(WorkItemDirectory) cd $(BlazorPizzaAOTDirectory) && $(PublishCommand) -f $(PerflabTargetFrameworks) && $(Python) test.py sod --scenario-name "%(Identity)" --dirs $(PizzaAppPubLocation) diff --git a/eng/testing/performance/ios_scenarios.proj b/eng/testing/performance/ios_scenarios.proj index 3801fd935d96f..b9efdefad6a36 100644 --- a/eng/testing/performance/ios_scenarios.proj +++ b/eng/testing/performance/ios_scenarios.proj @@ -15,10 +15,15 @@ - nollvm - llvm - symbols - nosymbols + nollvm + llvm + symbols + nosymbols + + + HybridGlobalization + $(LlvmPath)$(SymbolsPath)$(HybridGlobalizationPath) @@ -30,19 +35,19 @@ - + $(WorkItemDirectory) cd $(ScenarioDirectory)helloios;cp -rf $HELIX_CORRELATION_PAYLOAD/iosHelloWorld ./app;$(Python) pre.py --name app $(Python) test.py sod --scenario-name "%(Identity)" $(Python) post.py - + $(WorkItemDirectory) cd $(ScenarioDirectory)helloios;cp -v $HELIX_CORRELATION_PAYLOAD/iosHelloWorldZip/iOSSampleApp.zip .;$(Python) pre.py --name iOSSampleApp.zip $(Python) test.py sod --scenario-name "%(Identity)" $(Python) post.py - + $(WorkItemDirectory).zip 00:15:00 ios-device @@ -61,7 +66,7 @@ # Testing commands $(Python) test.py devicestartup --device-type ios --package-path HelloiOS.app --package-name net.dot.HelloiOS --scenario-name "%(Identity)" ((result=$?)) - + # Post commands $(Python) post.py exit $result diff --git a/eng/testing/performance/performance-setup.ps1 b/eng/testing/performance/performance-setup.ps1 index 8caea345a893d..30b0271b0b61e 100644 --- a/eng/testing/performance/performance-setup.ps1 +++ b/eng/testing/performance/performance-setup.ps1 @@ -14,7 +14,7 @@ Param( [string] $Kind="micro", [switch] $LLVM, [switch] $MonoInterpreter, - [switch] $MonoAOT, + [switch] $MonoAOT, [switch] $Internal, [switch] $Compare, [string] $MonoDotnet="", @@ -27,6 +27,7 @@ Param( [switch] $PhysicalPromotion, [switch] $iOSLlvmBuild, [switch] $iOSStripSymbols, + [switch] $HybridGlobalization, [string] $MauiVersion, [switch] $UseLocalCommitTime ) @@ -100,6 +101,10 @@ if ($iOSNativeAOT) { $Configurations += " iOSStripSymbols=$iOSStripSymbols" } +if ($HybridGlobalization -eq "True") { + $Configurations += " HybridGlobalization=True" +} + # FIX ME: This is a workaround until we get this from the actual pipeline $CleanedBranchName = "main" if($Branch.Contains("refs/heads/release")) @@ -124,7 +129,7 @@ if ($UseLocalCommitTime) { if ($RunFromPerformanceRepo) { $SetupArguments = "--perf-hash $CommitSha $CommonSetupArguments" - + robocopy $SourceDirectory $PerformanceDirectory /E /XD $PayloadDirectory $SourceDirectory\artifacts $SourceDirectory\.git } else { @@ -189,6 +194,7 @@ Write-PipelineSetVariable -Name 'Compare' -Value "$Compare" -IsMultiJobVariable Write-PipelineSetVariable -Name 'MonoDotnet' -Value "$UsingMono" -IsMultiJobVariable $false Write-PipelineSetVariable -Name 'iOSLlvmBuild' -Value "$iOSLlvmBuild" -IsMultiJobVariable $false Write-PipelineSetVariable -Name 'iOSStripSymbols' -Value "$iOSStripSymbols" -IsMultiJobVariable $false +Write-PipelineSetVariable -Name 'hybridGlobalization' -Value "$HybridGlobalization" -IsMultiJobVariable $false # Helix Arguments Write-PipelineSetVariable -Name 'Creator' -Value "$Creator" -IsMultiJobVariable $false diff --git a/eng/testing/performance/performance-setup.sh b/eng/testing/performance/performance-setup.sh index 1e25a46c39368..cb4b2af97f7ba 100755 --- a/eng/testing/performance/performance-setup.sh +++ b/eng/testing/performance/performance-setup.sh @@ -41,6 +41,7 @@ iosnativeaot=false runtimetype="" iosllvmbuild="" iosstripsymbols="" +hybridglobalization="" maui_version="" use_local_commit_time=false only_sanity=false @@ -188,6 +189,10 @@ while (($# > 0)); do iosstripsymbols=$2 shift 2 ;; + --hybridglobalization) + hybridglobalization=$2 + shift 2 + ;; --mauiversion) maui_version=$2 shift 2 @@ -238,6 +243,7 @@ while (($# > 0)); do echo " --iosnativeaot Set for ios Native AOT runs" echo " --iosllvmbuild Set LLVM for iOS Mono/Maui runs" echo " --iosstripsymbols Set STRIP_DEBUG_SYMBOLS for iOS Mono/Maui runs" + echo " --hybridglobalization Set hybrid globalization for iOS Mono/Maui/Wasm runs" echo " --mauiversion Set the maui version for Mono/Maui runs" echo " --uselocalcommittime Pass local runtime commit time to the setup script" echo " --nodynamicpgo Set for No dynamic PGO runs" @@ -364,6 +370,10 @@ if [[ "$physicalpromotion" == "true" ]]; then configurations="$configurations PhysicalPromotionType=physicalpromotion" fi +if [[ "${hybridglobalization,,}" == "true" ]]; then # convert to lowercase to test + configurations="$configurations HybridGlobalization=True" # Force True for consistency +fi + cleaned_branch_name="main" @@ -512,6 +522,7 @@ Write-PipelineSetVariable -name "MonoDotnet" -value "$using_mono" -is_multi_job_ Write-PipelineSetVariable -name "WasmDotnet" -value "$using_wasm" -is_multi_job_variable false Write-PipelineSetVariable -Name 'iOSLlvmBuild' -Value "$iosllvmbuild" -is_multi_job_variable false Write-PipelineSetVariable -Name 'iOSStripSymbols' -Value "$iosstripsymbols" -is_multi_job_variable false +Write-PipelineSetVariable -Name 'hybridGlobalization' -Value "$hybridglobalization" -is_multi_job_variable false Write-PipelineSetVariable -Name 'RuntimeType' -Value "$runtimetype" -is_multi_job_variable false Write-PipelineSetVariable -name "OnlySanityCheck" -value "$only_sanity" -is_multi_job_variable false Write-PipelineSetVariable -name "V8Version" -value "$v8_version" -is_multi_job_variable false diff --git a/eng/testing/tests.browser.targets b/eng/testing/tests.browser.targets index 8c4b3e46c9eac..2ce5f332cb8ea 100644 --- a/eng/testing/tests.browser.targets +++ b/eng/testing/tests.browser.targets @@ -24,9 +24,14 @@ false _GetWorkloadsToInstall;$(GetWorkloadInputsDependsOn) + true + true _GetRuntimePackNuGetsToBuild;_GetNugetsForAOT;$(GetNuGetsToBuildForWorkloadTestingDependsOn) <_BundleAOTTestWasmAppForHelixDependsOn>$(_BundleAOTTestWasmAppForHelixDependsOn);PrepareForWasmBuildApp;_PrepareForAOTOnHelix @@ -59,11 +64,14 @@ + Condition="'$(ContinuousIntegrationBuild)' != 'true' or '$(IsBrowserWasmProject)' != 'true'" /> <_WasmBrowserPathForTests Condition="'$(BROWSER_PATH_FOR_TESTS)' != ''">$(BROWSER_PATH_FOR_TESTS) <_WasmBrowserPathForTests Condition="'$(_WasmBrowserPathForTests)' == '' and '$(InstallChromeForTests)' == 'true'">$(ChromeBinaryPath) + + <_WasmJSEnginePathForTests Condition="'$(V8_PATH_FOR_TESTS)' != ''">$(V8_PATH_FOR_TESTS) + <_WasmJSEnginePathForTests Condition="'$(_WasmJSEnginePathForTests)' == '' and '$(InstallV8ForTests)' == 'true'">$(V8BinaryPath) @@ -264,6 +273,11 @@ Variant="latest" Version="$(PackageVersionForWorkloadManifests)" /> + + - + diff --git a/eng/testing/tests.singlefile.targets b/eng/testing/tests.singlefile.targets index 0bb1f4ec5fd0a..8a01f084400db 100644 --- a/eng/testing/tests.singlefile.targets +++ b/eng/testing/tests.singlefile.targets @@ -142,19 +142,6 @@ - - - $(CoreCLRCrossgen2Dir)crossgen2$(ExeSuffix) - - - - - - - - - - + + + diff --git a/eng/testing/wasm-provisioning.targets b/eng/testing/wasm-provisioning.targets index d1c9bda6653fe..4852fd5ecc2ec 100644 --- a/eng/testing/wasm-provisioning.targets +++ b/eng/testing/wasm-provisioning.targets @@ -11,10 +11,13 @@ false + false $(ArtifactsBinDir)firefox\ $([MSBuild]::NormalizePath($(FirefoxDir), '.install-firefox-$(FirefoxRevision).stamp')) <_BrowserStampDir>$(ArtifactsBinDir)\ + + Build @@ -38,6 +41,11 @@ $(linux_ChromeBaseSnapshotUrl)/chrome-linux.zip $(linux_ChromeBaseSnapshotUrl)/chromedriver_linux64.zip + + $(linux_V8Version) + v8-$(linux_V8Version) + $(V8DirName).sh + <_V8PlatformId>linux64 @@ -53,6 +61,11 @@ $(win_ChromeBaseSnapshotUrl)/chrome-win.zip $(win_ChromeBaseSnapshotUrl)/chromedriver_win32.zip + + $(win_V8Version) + v8-$(win_V8Version) + $(V8DirName).cmd + <_V8PlatformId>win32 @@ -65,6 +78,12 @@ $([MSBuild]::NormalizePath($(ChromeDriverDir), $(ChromeDriverDirName), $(ChromeDriverBinaryName))) + + $(ArtifactsBinDir)$(V8DirName)\ + $([MSBuild]::NormalizePath('$(V8Dir)', '.install-$(V8Version).stamp')) + $([MSBuild]::NormalizePath($(V8Dir), $(V8BinaryName))) + + 108.0.1 https://ftp.mozilla.org/pub/firefox/releases/$(FirefoxRevision)/linux-x86_64/en-US/firefox-$(FirefoxRevision).tar.bz2 @@ -72,9 +91,11 @@ + @@ -87,6 +108,7 @@ + @@ -97,9 +119,11 @@ + @@ -121,8 +145,41 @@ + + + + + + + + <_V8SnapshotUrl>https://storage.googleapis.com/chromium-v8/official/canary/v8-$(_V8PlatformId)-rel-$(V8Version).zip + + <_V8Script Condition="$([MSBuild]::IsOSPlatform('linux'))">#!/usr/bin/env bash +export __SCRIPT_DIR=%24( cd -- "%24( dirname -- "%24{BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +"$__SCRIPT_DIR/d8" --snapshot_blob="$__SCRIPT_DIR/snapshot_blob.bin" "$@" + + <_V8Script Condition="$([MSBuild]::IsOSPlatform('windows'))">@echo off +"%~dp0\d8.exe --snapshot_blob="%~dp0\snapshot_blob.bin" %* + + + + + + + + + + + + + + + <_StampFile Include="$(_BrowserStampDir).install-firefox*.stamp" /> diff --git a/global.json b/global.json index d9233ff3dc9fd..7f724a4285d93 100644 --- a/global.json +++ b/global.json @@ -13,6 +13,6 @@ "Microsoft.DotNet.SharedFramework.Sdk": "8.0.0-beta.23463.1", "Microsoft.Build.NoTargets": "3.7.0", "Microsoft.Build.Traversal": "3.4.0", - "Microsoft.NET.Sdk.IL": "9.0.0-alpha.1.23466.6" + "Microsoft.NET.Sdk.IL": "9.0.0-alpha.1.23470.17" } } diff --git a/src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj b/src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj index 888f3f8b1498e..20073739b2bbd 100644 --- a/src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj +++ b/src/coreclr/System.Private.CoreLib/System.Private.CoreLib.csproj @@ -290,6 +290,7 @@ + diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs index 1c1b9111bf4a1..a931499ac7e2d 100644 --- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs +++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs @@ -493,7 +493,7 @@ internal unsafe struct MethodTable private const uint enum_flag_NonTrivialInterfaceCast = 0x00080000 // enum_flag_Category_Array | 0x40000000 // enum_flag_ComObject | 0x00400000 // enum_flag_ICastable; - | 0x00200000 // enum_flag_IDynamicInterfaceCastable; + | 0x10000000 // enum_flag_IDynamicInterfaceCastable; | 0x00040000; // enum_flag_Category_ValueType private const int DebugClassNamePtr = // adjust for debug_m_szClassName diff --git a/src/coreclr/System.Private.CoreLib/src/System/Threading/Mutex.CoreCLR.Unix.cs b/src/coreclr/System.Private.CoreLib/src/System/Threading/Mutex.CoreCLR.Unix.cs new file mode 100644 index 0000000000000..52233ffc583b1 --- /dev/null +++ b/src/coreclr/System.Private.CoreLib/src/System/Threading/Mutex.CoreCLR.Unix.cs @@ -0,0 +1,135 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.IO; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Text; +using Microsoft.Win32.SafeHandles; + +namespace System.Threading +{ + /// + /// Synchronization primitive that can also be used for interprocess synchronization + /// + public sealed partial class Mutex : WaitHandle + { + private void CreateMutexCore(bool initiallyOwned, string? name, out bool createdNew) + { + SafeWaitHandle mutexHandle = CreateMutexCore(initiallyOwned, name, out int errorCode, out string? errorDetails); + if (mutexHandle.IsInvalid) + { + mutexHandle.SetHandleAsInvalid(); + if (errorCode == Interop.Errors.ERROR_FILENAME_EXCED_RANGE) + // On Unix, length validation is done by CoreCLR's PAL after converting to utf-8 + throw new ArgumentException(SR.Argument_WaitHandleNameTooLong, nameof(name)); + if (errorCode == Interop.Errors.ERROR_INVALID_HANDLE) + throw new WaitHandleCannotBeOpenedException(SR.Format(SR.Threading_WaitHandleCannotBeOpenedException_InvalidHandle, name)); + + throw Win32Marshal.GetExceptionForWin32Error(errorCode, name, errorDetails); + } + + createdNew = errorCode != Interop.Errors.ERROR_ALREADY_EXISTS; + SafeWaitHandle = mutexHandle; + } + + private static OpenExistingResult OpenExistingWorker(string name, out Mutex? result) + { + ArgumentException.ThrowIfNullOrEmpty(name); + + result = null; + // To allow users to view & edit the ACL's, call OpenMutex + // with parameters to allow us to view & edit the ACL. This will + // fail if we don't have permission to view or edit the ACL's. + // If that happens, ask for less permissions. + SafeWaitHandle myHandle = OpenMutexCore(name, out int errorCode, out string? errorDetails); + + if (myHandle.IsInvalid) + { + myHandle.Dispose(); + + if (errorCode == Interop.Errors.ERROR_FILENAME_EXCED_RANGE) + { + // On Unix, length validation is done by CoreCLR's PAL after converting to utf-8 + throw new ArgumentException(SR.Argument_WaitHandleNameTooLong, nameof(name)); + } + if (Interop.Errors.ERROR_FILE_NOT_FOUND == errorCode || Interop.Errors.ERROR_INVALID_NAME == errorCode) + return OpenExistingResult.NameNotFound; + if (Interop.Errors.ERROR_PATH_NOT_FOUND == errorCode) + return OpenExistingResult.PathNotFound; + if (Interop.Errors.ERROR_INVALID_HANDLE == errorCode) + return OpenExistingResult.NameInvalid; + + throw Win32Marshal.GetExceptionForWin32Error(errorCode, name, errorDetails); + } + + result = new Mutex(myHandle); + return OpenExistingResult.Success; + } + + // Note: To call ReleaseMutex, you must have an ACL granting you + // MUTEX_MODIFY_STATE rights (0x0001). The other interesting value + // in a Mutex's ACL is MUTEX_ALL_ACCESS (0x1F0001). + public void ReleaseMutex() + { + if (!Interop.Kernel32.ReleaseMutex(SafeWaitHandle)) + { + throw new ApplicationException(SR.Arg_SynchronizationLockException); + } + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Unix-specific implementation + + private const int SystemCallErrorsBufferSize = 256; + + private static unsafe SafeWaitHandle CreateMutexCore( + bool initialOwner, + string? name, + out int errorCode, + out string? errorDetails) + { + byte* systemCallErrors = stackalloc byte[SystemCallErrorsBufferSize]; + SafeWaitHandle mutexHandle = CreateMutex(initialOwner, name, systemCallErrors, SystemCallErrorsBufferSize); + + // Get the error code even if the handle is valid, as it could be ERROR_ALREADY_EXISTS, indicating that the mutex + // already exists and was opened + errorCode = Marshal.GetLastPInvokeError(); + + errorDetails = mutexHandle.IsInvalid ? GetErrorDetails(systemCallErrors) : null; + return mutexHandle; + } + + private static unsafe SafeWaitHandle OpenMutexCore(string name, out int errorCode, out string? errorDetails) + { + byte* systemCallErrors = stackalloc byte[SystemCallErrorsBufferSize]; + SafeWaitHandle mutexHandle = OpenMutex(name, systemCallErrors, SystemCallErrorsBufferSize); + errorCode = mutexHandle.IsInvalid ? Marshal.GetLastPInvokeError() : Interop.Errors.ERROR_SUCCESS; + errorDetails = mutexHandle.IsInvalid ? GetErrorDetails(systemCallErrors) : null; + return mutexHandle; + } + + private static unsafe string? GetErrorDetails(byte* systemCallErrors) + { + int systemCallErrorsLength = + new ReadOnlySpan(systemCallErrors, SystemCallErrorsBufferSize).IndexOf((byte)'\0'); + if (systemCallErrorsLength > 0) + { + try + { + return + SR.Format(SR.Unix_SystemCallErrors, Encoding.UTF8.GetString(systemCallErrors, systemCallErrorsLength)); + } + catch { } // avoid hiding the original error due to an error here + } + + return null; + } + + [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "PAL_CreateMutexW", SetLastError = true, StringMarshalling = StringMarshalling.Utf16)] + private static unsafe partial SafeWaitHandle CreateMutex([MarshalAs(UnmanagedType.Bool)] bool initialOwner, string? name, byte* systemCallErrors, uint systemCallErrorsBufferSize); + + [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "PAL_OpenMutexW", SetLastError = true, StringMarshalling = StringMarshalling.Utf16)] + private static unsafe partial SafeWaitHandle OpenMutex(string name, byte* systemCallErrors, uint systemCallErrorsBufferSize); + } +} diff --git a/src/coreclr/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs index 889bb0d492c74..13d0a426df19f 100644 --- a/src/coreclr/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs +++ b/src/coreclr/System.Private.CoreLib/src/System/Threading/Thread.CoreCLR.cs @@ -118,15 +118,35 @@ private void StartCallback() [MethodImpl(MethodImplOptions.InternalCall)] private static extern void SleepInternal(int millisecondsTimeout); + // Max iterations to be done in SpinWait without switching GC modes. + private const int SpinWaitCoopThreshold = 1024; + + [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "ThreadNative_SpinWait")] + [SuppressGCTransition] + private static partial void SpinWaitInternal(int iterations); + + [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "ThreadNative_SpinWait")] + private static partial void LongSpinWaitInternal(int iterations); + + [MethodImpl(MethodImplOptions.NoInlining)] // Slow path method. Make sure that the caller frame does not pay for PInvoke overhead. + private static void LongSpinWait(int iterations) => LongSpinWaitInternal(iterations); + /// /// Wait for a length of time proportional to 'iterations'. Each iteration is should /// only take a few machine instructions. Calling this API is preferable to coding /// a explicit busy loop because the hardware can be informed that it is busy waiting. /// - [MethodImpl(MethodImplOptions.InternalCall)] - private static extern void SpinWaitInternal(int iterations); - - public static void SpinWait(int iterations) => SpinWaitInternal(iterations); + public static void SpinWait(int iterations) + { + if (iterations < SpinWaitCoopThreshold) + { + SpinWaitInternal(iterations); + } + else + { + LongSpinWait(iterations); + } + } [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "ThreadNative_YieldThread")] private static partial Interop.BOOL YieldInternal(); diff --git a/src/coreclr/clrdefinitions.cmake b/src/coreclr/clrdefinitions.cmake index 3805833874132..03615c9106bd0 100644 --- a/src/coreclr/clrdefinitions.cmake +++ b/src/coreclr/clrdefinitions.cmake @@ -132,9 +132,12 @@ if (CLR_CMAKE_TARGET_WIN32 AND (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ add_definitions(-DFEATURE_INTEROP_DEBUGGING) endif (CLR_CMAKE_TARGET_WIN32 AND (CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386 OR CLR_CMAKE_TARGET_ARCH_ARM64)) if(FEATURE_INTERPRETER) - add_compile_definitions(FEATURE_INTERPRETER) + add_compile_definitions(FEATURE_INTERPRETER) endif(FEATURE_INTERPRETER) -add_definitions(-DFEATURE_ISYM_READER) + +if (CLR_CMAKE_TARGET_WIN32) + add_definitions(-DFEATURE_ISYM_READER) +endif(CLR_CMAKE_TARGET_WIN32) if(FEATURE_MERGE_JIT_AND_ENGINE) add_compile_definitions($<$>>:FEATURE_MERGE_JIT_AND_ENGINE>) diff --git a/src/coreclr/crossgen-corelib.proj b/src/coreclr/crossgen-corelib.proj index 2dc62b8151a3c..12d2f17ddcc51 100644 --- a/src/coreclr/crossgen-corelib.proj +++ b/src/coreclr/crossgen-corelib.proj @@ -5,8 +5,7 @@ - - + diff --git a/src/coreclr/dlls/mscoree/exports.cpp b/src/coreclr/dlls/mscoree/exports.cpp index d14471f3b6bb8..f1cfb2faf5c88 100644 --- a/src/coreclr/dlls/mscoree/exports.cpp +++ b/src/coreclr/dlls/mscoree/exports.cpp @@ -138,7 +138,6 @@ static void ConvertConfigPropertiesToUnicode( LPCWSTR** propertyValuesWRef, BundleProbeFn** bundleProbe, PInvokeOverrideFn** pinvokeOverride, - bool* hostPolicyEmbedded, host_runtime_contract** hostContract) { LPCWSTR* propertyKeysW = new (nothrow) LPCWSTR[propertyCount]; @@ -170,11 +169,6 @@ static void ConvertConfigPropertiesToUnicode( if (*pinvokeOverride == nullptr) *pinvokeOverride = (PInvokeOverrideFn*)u16_strtoui64(propertyValuesW[propertyIndex], nullptr, 0); } - else if (strcmp(propertyKeys[propertyIndex], HOST_PROPERTY_HOSTPOLICY_EMBEDDED) == 0) - { - // The HOSTPOLICY_EMBEDDED property indicates if the executable has hostpolicy statically linked in - *hostPolicyEmbedded = (u16_strcmp(propertyValuesW[propertyIndex], W("true")) == 0); - } else if (strcmp(propertyKeys[propertyIndex], HOST_PROPERTY_RUNTIME_CONTRACT) == 0) { // Host contract is passed in as the value of HOST_RUNTIME_CONTRACT property (encoded as a string). @@ -252,7 +246,6 @@ int coreclr_initialize( LPCWSTR* propertyKeysW; LPCWSTR* propertyValuesW; BundleProbeFn* bundleProbe = nullptr; - bool hostPolicyEmbedded = false; PInvokeOverrideFn* pinvokeOverride = nullptr; host_runtime_contract* hostContract = nullptr; @@ -268,7 +261,6 @@ int coreclr_initialize( &propertyValuesW, &bundleProbe, &pinvokeOverride, - &hostPolicyEmbedded, &hostContract); #ifdef TARGET_UNIX @@ -283,8 +275,6 @@ int coreclr_initialize( } #endif - g_hostpolicy_embedded = hostPolicyEmbedded; - if (hostContract != nullptr) { HostInformation::SetContract(hostContract); diff --git a/src/coreclr/gc/env/gcenv.base.h b/src/coreclr/gc/env/gcenv.base.h index cbbf358fac3cd..dccf7e12fc7b5 100644 --- a/src/coreclr/gc/env/gcenv.base.h +++ b/src/coreclr/gc/env/gcenv.base.h @@ -54,8 +54,11 @@ typedef int BOOL; typedef uint32_t DWORD; typedef uint64_t DWORD64; +#ifdef _MSC_VER +typedef unsigned long ULONG; +#else typedef uint32_t ULONG; - +#endif // ----------------------------------------------------------------------------------------------------------- // HRESULT subset. diff --git a/src/coreclr/gc/env/gcenv.object.h b/src/coreclr/gc/env/gcenv.object.h index d04445ea36037..ff0dbb343ed1d 100644 --- a/src/coreclr/gc/env/gcenv.object.h +++ b/src/coreclr/gc/env/gcenv.object.h @@ -4,6 +4,10 @@ #ifndef __GCENV_OBJECT_H__ #define __GCENV_OBJECT_H__ +#ifdef BUILD_AS_STANDALONE +extern bool g_oldMethodTableFlags; +#endif + // ARM requires that 64-bit primitive types are aligned at 64-bit boundaries for interlocked-like operations. // Additionally the platform ABI requires these types and composite type containing them to be similarly // aligned when passed as arguments. @@ -42,15 +46,15 @@ class ObjHeader static_assert(sizeof(ObjHeader) == sizeof(uintptr_t), "this assumption is made by the VM!"); -#define MTFlag_RequireAlign8 0x00001000 -#define MTFlag_Category_ValueType 0x00040000 -#define MTFlag_Category_ValueType_Mask 0x000C0000 -#define MTFlag_ContainsPointers 0x01000000 -#define MTFlag_HasCriticalFinalizer 0x08000000 -#define MTFlag_HasFinalizer 0x00100000 -#define MTFlag_IsArray 0x00080000 -#define MTFlag_Collectible 0x10000000 -#define MTFlag_HasComponentSize 0x80000000 +#define MTFlag_RequiresAlign8 0x00001000 // enum_flag_RequiresAlign8 +#define MTFlag_Category_ValueType 0x00040000 // enum_flag_Category_ValueType +#define MTFlag_Category_ValueType_Mask 0x000C0000 // enum_flag_Category_ValueType_Mask +#define MTFlag_ContainsPointers 0x01000000 // enum_flag_ContainsPointers +#define MTFlag_HasCriticalFinalizer 0x00000002 // enum_flag_HasCriticalFinalizer +#define MTFlag_HasFinalizer 0x00100000 // enum_flag_HasFinalizer +#define MTFlag_IsArray 0x00080000 // enum_flag_Category_Array +#define MTFlag_Collectible 0x00200000 // enum_flag_Collectible +#define MTFlag_HasComponentSize 0x80000000 // enum_flag_HasComponentSize class MethodTable { @@ -85,6 +89,14 @@ class MethodTable bool Collectible() { +#ifdef BUILD_AS_STANDALONE + if (g_oldMethodTableFlags) + { + // This flag is used for .NET 8 or below + const int Old_MTFlag_Collectible = 0x10000000; + return (m_flags & Old_MTFlag_Collectible) != 0; + } +#endif return (m_flags & MTFlag_Collectible) != 0; } @@ -100,7 +112,7 @@ class MethodTable bool RequiresAlign8() { - return (m_flags & MTFlag_RequireAlign8) != 0; + return (m_flags & MTFlag_RequiresAlign8) != 0; } bool IsValueType() @@ -127,18 +139,15 @@ class MethodTable bool HasCriticalFinalizer() { - return (m_flags & MTFlag_HasCriticalFinalizer) != 0; - } - - bool IsArray() - { - return (m_flags & MTFlag_IsArray) != 0; - } - - MethodTable * GetParent() - { - _ASSERTE(!IsArray()); - return m_pRelatedType; +#ifdef BUILD_AS_STANDALONE + if (g_oldMethodTableFlags) + { + // This flag is used for .NET 8 or below + const int Old_MTFlag_HasCriticalFinalizer = 0x08000000; + return (m_flags & Old_MTFlag_HasCriticalFinalizer) != 0; + } +#endif + return !HasComponentSize() && (m_flags & MTFlag_HasCriticalFinalizer); } bool SanityCheck() diff --git a/src/coreclr/gc/gccommon.cpp b/src/coreclr/gc/gccommon.cpp index 413075246fd5c..2986ce4c2105d 100644 --- a/src/coreclr/gc/gccommon.cpp +++ b/src/coreclr/gc/gccommon.cpp @@ -19,6 +19,7 @@ IGCHandleManager* g_theGCHandleManager; #ifdef BUILD_AS_STANDALONE IGCToCLR* g_theGCToCLR; VersionInfo g_runtimeSupportedVersion; +bool g_oldMethodTableFlags; #endif // BUILD_AS_STANDALONE #ifdef GC_CONFIG_DRIVEN diff --git a/src/coreclr/gc/gcenv.ee.standalone.inl b/src/coreclr/gc/gcenv.ee.standalone.inl index 24ca20f5837bd..1751e069c3e43 100644 --- a/src/coreclr/gc/gcenv.ee.standalone.inl +++ b/src/coreclr/gc/gcenv.ee.standalone.inl @@ -14,6 +14,9 @@ extern IGCToCLR* g_theGCToCLR; // GC version that the current runtime supports extern VersionInfo g_runtimeSupportedVersion; +// Does the runtime use the old method table flags +extern bool g_oldMethodTableFlags; + struct StressLogMsg; // When we are building the GC in a standalone environment, we diff --git a/src/coreclr/gc/gcinterface.h b/src/coreclr/gc/gcinterface.h index e992082b78ff6..3cf40f920ec9b 100644 --- a/src/coreclr/gc/gcinterface.h +++ b/src/coreclr/gc/gcinterface.h @@ -15,7 +15,7 @@ // The major version of the IGCToCLR interface. Breaking changes to this interface // require bumps in the major version number. -#define EE_INTERFACE_MAJOR_VERSION 1 +#define EE_INTERFACE_MAJOR_VERSION 2 struct ScanContext; struct gc_alloc_context; diff --git a/src/coreclr/gc/gcload.cpp b/src/coreclr/gc/gcload.cpp index 48c1715020d83..05f297f2cb0ea 100644 --- a/src/coreclr/gc/gcload.cpp +++ b/src/coreclr/gc/gcload.cpp @@ -51,6 +51,7 @@ GC_VersionInfo(/* InOut */ VersionInfo* info) // For example, GC would only call functions on g_theGCToCLR interface that the runtime // supports. g_runtimeSupportedVersion = *info; + g_oldMethodTableFlags = g_runtimeSupportedVersion.MajorVersion < 2; #endif info->MajorVersion = GC_INTERFACE_MAJOR_VERSION; info->MinorVersion = GC_INTERFACE_MINOR_VERSION; diff --git a/src/coreclr/gc/unix/gcenv.unix.cpp b/src/coreclr/gc/unix/gcenv.unix.cpp index ab99940a0151c..62680cf1428e2 100644 --- a/src/coreclr/gc/unix/gcenv.unix.cpp +++ b/src/coreclr/gc/unix/gcenv.unix.cpp @@ -168,6 +168,17 @@ enum membarrier_cmd bool CanFlushUsingMembarrier() { + +#ifdef TARGET_ANDROID + // Avoid calling membarrier on older Android versions where membarrier + // may be barred by seccomp causing the process to be killed. + int apiLevel = android_get_device_api_level(); + if (apiLevel < __ANDROID_API_Q__) + { + return false; + } +#endif + // Starting with Linux kernel 4.14, process memory barriers can be generated // using MEMBARRIER_CMD_PRIVATE_EXPEDITED. diff --git a/src/coreclr/inc/crosscomp.h b/src/coreclr/inc/crosscomp.h index d5e4ca3004b8d..aeb061ca5ba47 100644 --- a/src/coreclr/inc/crosscomp.h +++ b/src/coreclr/inc/crosscomp.h @@ -282,7 +282,8 @@ typedef struct DECLSPEC_ALIGN(16) _T_CONTEXT { } T_CONTEXT, *PT_CONTEXT; // _IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY (see ExternalAPIs\Win9CoreSystem\inc\winnt.h) -typedef struct _T_RUNTIME_FUNCTION { +#ifdef HOST_UNIX +typedef struct _IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY { DWORD BeginAddress; union { DWORD UnwindData; @@ -294,12 +295,11 @@ typedef struct _T_RUNTIME_FUNCTION { DWORD H : 1; DWORD CR : 2; DWORD FrameSize : 9; - } PackedUnwindData; + }; }; -} T_RUNTIME_FUNCTION, *PT_RUNTIME_FUNCTION; +} IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY, * PIMAGE_ARM64_RUNTIME_FUNCTION_ENTRY; -#ifdef HOST_UNIX typedef EXCEPTION_DISPOSITION @@ -310,6 +310,8 @@ EXCEPTION_DISPOSITION PVOID DispatcherContext ); #endif + +typedef IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY T_RUNTIME_FUNCTION, * PT_RUNTIME_FUNCTION; // // Define exception dispatch context structure. // diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index 6c353a6a238ef..c9670fc316c3a 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -4450,7 +4450,7 @@ bool Compiler::optNonNullAssertionProp_Ind(ASSERT_VALARG_TP assertions, GenTree* indir->gtFlags |= GTF_IND_NONFAULTING; // Set this flag to prevent reordering - indir->gtFlags |= GTF_ORDER_SIDEEFF; + indir->SetHasOrderingSideEffect(); return true; } diff --git a/src/coreclr/jit/codegen.h b/src/coreclr/jit/codegen.h index e00dff0f7bacd..7e15e7f652615 100644 --- a/src/coreclr/jit/codegen.h +++ b/src/coreclr/jit/codegen.h @@ -1051,11 +1051,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #endif // !defined(TARGET_64BIT) //------------------------------------------------------------------------- - // genUpdateLifeStore: Do liveness udpate after tree store instructions + // genUpdateLifeStore: Do liveness update after tree store instructions // were emitted, update result var's home if it was stored on stack. // // Arguments: - // tree - Gentree node + // tree - GenTree node // targetReg - of the tree // varDsc - result value's variable // diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index 092a031f27048..ed198e37a7328 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -2160,6 +2160,8 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest); + BasicBlock* const nextBlock = block->bbNext; + if (block->bbFlags & BBF_RETLESS_CALL) { // We have a retless call, and the last instruction generated was a call. @@ -2167,7 +2169,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // block), then we need to generate a breakpoint here (since it will never // get executed) to get proper unwind behavior. - if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext)) + if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) { instGen(INS_BREAKPOINT); // This should never get executed } @@ -2179,8 +2181,10 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // handler. So turn off GC reporting for this single instruction. GetEmitter()->emitDisableGC(); + BasicBlock* const jumpDest = nextBlock->bbJumpDest; + // Now go to where the finally funclet needs to return to. - if (block->bbNext->bbJumpDest == block->bbNext->bbNext) + if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly @@ -2190,7 +2194,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } else { - inst_JMP(EJ_jmp, block->bbNext->bbJumpDest); + inst_JMP(EJ_jmp, jumpDest); } GetEmitter()->emitEnableGC(); @@ -2203,7 +2207,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) if (!(block->bbFlags & BBF_RETLESS_CALL)) { assert(block->isBBCallAlwaysPair()); - block = block->bbNext; + block = nextBlock; } return block; } diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index a4af8db11ad1e..a0a9967b24e04 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -4610,7 +4610,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) // addressing mode instruction. Currently we're 'cheating' by producing one or more // instructions to generate the addressing mode so we need to modify lowering to // produce LEAs that are a 1:1 relationship to the ARM64 architecture. - if (lea->Base() && lea->Index()) + if (lea->HasBase() && lea->HasIndex()) { GenTree* memBase = lea->Base(); GenTree* index = lea->Index(); @@ -4687,7 +4687,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) genScaledAdd(size, lea->GetRegNum(), memBase->GetRegNum(), index->GetRegNum(), scale); } } - else if (lea->Base()) + else if (lea->HasBase()) { GenTree* memBase = lea->Base(); @@ -4715,7 +4715,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg); } } - else if (lea->Index()) + else if (lea->HasIndex()) { // If we encounter a GT_LEA node without a base it means it came out // when attempting to optimize an arbitrary arithmetic expression during lower. diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index c364b5c353852..022294c810ec3 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -664,7 +664,7 @@ regMaskTP Compiler::compHelperCallKillSet(CorInfoHelpFunc helper) // intervals when calling "siStartVariableLiveRange" and "siEndVariableLiveRange". // // Notes: -// If "ForCodeGen" is false, only "compCurLife" set (and no mask) will be setted. +// If "ForCodeGen" is false, only "compCurLife" set (and no mask) will be updated. // template void Compiler::compChangeLife(VARSET_VALARG_TP newLife) @@ -2919,6 +2919,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere bool writeThru; // true if the argument gets homed to both stack and register bool processed; // true after we've processed the argument (and it is in its final location) bool circular; // true if this register participates in a circular dependency loop. + bool hfaConflict; // arg is part of an HFA that will end up in the same register + // but in a different slot (eg arg in s3 = v3.s[0], needs to end up in v3.s[3]) } regArgTab[max(MAX_REG_ARG + 1, MAX_FLOAT_REG_ARG)] = {}; unsigned varNum; @@ -3284,7 +3286,8 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere * A circular dependency is a set of registers R1, R2, ..., Rn * such that R1->R2 (that is, R1 needs to be moved to R2), R2->R3, ..., Rn->R1 */ - bool change = true; + bool change = true; + bool hasHfaConflict = false; if (regArgMaskLive) { /* Possible circular dependencies still exist; the previous pass was not enough @@ -3337,10 +3340,32 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere { // This must be a SIMD type that's fully enregistered, but is passed as an HFA. // Each field will be inserted into the same destination register. + // assert(varTypeIsSIMD(varDsc)); assert(regArgTab[argNum].slot <= (int)varDsc->lvHfaSlots()); assert(argNum > 0); assert(regArgTab[argNum - 1].varNum == varNum); + + // If the field is passed in the same register as the destination, + // but is in the wrong part of the register, mark it specially so later + // we make sure to move it to the right spot before "freeing" the destination. + // + destRegNum = varDsc->GetRegNum(); + if (regNum == destRegNum) + { + // We only get here if the HFA part is not already in the right slot in + // the destination. That is, it is not slot-1. + // + const int slot = regArgTab[argNum].slot; + assert(slot != 1); + JITDUMP("HFA conflict; arg num %u needs to move from %s[%u] to %s[%u]\n", argNum, + getRegName(regNum), 0, getRegName(destRegNum), slot - 1); + regArgTab[argNum].hfaConflict = true; + + // We'll need to do a special pass later to resolve these + // + hasHfaConflict = true; + } regArgMaskLive &= ~genRegMask(regNum); regArgTab[argNum].circular = false; change = true; @@ -3736,13 +3761,13 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere { size = EA_4BYTE; } + // HVA types...? /* move the dest reg (begReg) in the extra reg */ assert(xtraReg != REG_NA); regNumber begRegNum = genMapRegArgNumToRegNum(begReg, destMemType); - GetEmitter()->emitIns_Mov(insCopy, size, xtraReg, begRegNum, /* canSkip */ false); regSet.verifyRegUsed(xtraReg); @@ -3823,6 +3848,75 @@ void CodeGen::genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbere } } +#if defined(TARGET_ARM64) && defined(FEATURE_SIMD) + // If we saw any hfa conflicts, handle those now. + // + if (hasHfaConflict) + { + // Up above we noticed that there was at least one non-slot-1 HFA arg whose + // destination register was the same as the arg register. + // + // For example, say an HFA was passed as s0-s3 and the destination was v3. + // s3 is in the right register, but not in the right slot in the register. + // + // We handle this by first moving the conflicting part to the right slot + // in the destination (via pass 0 below), and then moving the remaining parts + // to their respective slots (via pass 1). + // + // Note the slot index in the register is one less than value of + // regArgTab[argNum].slot, so a slot-1 hfa arg goes into slot 0 of the destination). + // + // So for the above example, we'd first move the "slot-4" s3 (== v3.s[0]) to v3.s[3]. + // Then we can insert s0 to v3.s[0]) and so on. + // + // We can exempt slot-1 cases as the conflicting part is already in the + // right slot, and code lower down correctly handles populating the remaining slots. + // + for (argNum = 0; argNum < argMax; argNum++) + { + if (!regArgTab[argNum].hfaConflict) + { + continue; + } + + varNum = regArgTab[argNum].varNum; + varDsc = compiler->lvaGetDesc(varNum); + const regNumber destRegNum = varDsc->GetRegNum(); + const var_types regType = regArgTab[argNum].type; + const unsigned firstArgNum = argNum - (regArgTab[argNum].slot - 1); + const unsigned lastArgNum = firstArgNum + varDsc->lvHfaSlots() - 1; + + assert(varDsc->lvIsHfa()); + assert((argNum >= firstArgNum) && (argNum <= lastArgNum)); + assert(destRegNum == genMapRegArgNumToRegNum(argNum, regType)); + + // Pass 0: move the conflicting part; Pass1: insert everything else + // + for (int pass = 0; pass <= 1; pass++) + { + for (unsigned currentArgNum = firstArgNum; currentArgNum <= lastArgNum; currentArgNum++) + { + const regNumber regNum = genMapRegArgNumToRegNum(currentArgNum, regType); + bool insertArg = + ((pass == 0) && (currentArgNum == argNum)) || ((pass == 1) && (currentArgNum != argNum)); + + if (insertArg) + { + assert(!regArgTab[currentArgNum].processed); + + // EA_4BYTE is probably wrong here (and below) + // todo -- suppress self move + GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_4BYTE, destRegNum, regNum, + regArgTab[currentArgNum].slot - 1, 0); + regArgTab[currentArgNum].processed = true; + regArgMaskLive &= ~genRegMask(regNum); + } + } + } + } + } +#endif // defined(TARGET_ARM64) && defined(FEATURE_SIMD) + /* Finally take care of the remaining arguments that must be enregistered */ while (regArgMaskLive) { @@ -6772,317 +6866,6 @@ void CodeGen::genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefReg #endif // FEATURE_FIXED_OUT_ARGS } -/***************************************************************************** - * genSetScopeInfo - * - * This function should be called only after the sizes of the emitter blocks - * have been finalized. - */ - -void CodeGen::genSetScopeInfo() -{ - if (!compiler->opts.compScopeInfo) - { - return; - } - -#ifdef DEBUG - if (verbose) - { - printf("*************** In genSetScopeInfo()\n"); - } -#endif - - unsigned varsLocationsCount = 0; - - varsLocationsCount = (unsigned int)varLiveKeeper->getLiveRangesCount(); - - if (varsLocationsCount == 0) - { - // No variable home to report - compiler->eeSetLVcount(0); - compiler->eeSetLVdone(); - return; - } - - noway_assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0)); - - // Initialize the table where the reported variables' home will be placed. - compiler->eeSetLVcount(varsLocationsCount); - -#ifdef DEBUG - genTrnslLocalVarCount = varsLocationsCount; - if (varsLocationsCount) - { - genTrnslLocalVarInfo = new (compiler, CMK_DebugOnly) TrnslLocalVarInfo[varsLocationsCount]; - } -#endif - - // We can have one of both flags defined, both, or none. Specially if we need to compare both - // both results. But we cannot report both to the debugger, since there would be overlapping - // intervals, and may not indicate the same variable location. - - genSetScopeInfoUsingVariableRanges(); - - compiler->eeSetLVdone(); -} - -//------------------------------------------------------------------------ -// genSetScopeInfoUsingVariableRanges: Call "genSetScopeInfo" with the -// "VariableLiveRanges" created for the arguments, special arguments and -// IL local variables. -// -// Notes: -// This function is called from "genSetScopeInfo" once the code is generated -// and we want to send debug info to the debugger. -// -void CodeGen::genSetScopeInfoUsingVariableRanges() -{ - unsigned int liveRangeIndex = 0; - - for (unsigned int varNum = 0; varNum < compiler->info.compLocalsCount; varNum++) - { - LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); - - if (compiler->compMap2ILvarNum(varNum) == (unsigned int)ICorDebugInfo::UNKNOWN_ILNUM) - { - continue; - } - - auto reportRange = [this, varDsc, varNum, &liveRangeIndex](siVarLoc* loc, UNATIVE_OFFSET start, - UNATIVE_OFFSET end) { - if (varDsc->lvIsParam && (start == end)) - { - // If the length is zero, it means that the prolog is empty. In that case, - // CodeGen::genSetScopeInfo will report the liveness of all arguments - // as spanning the first instruction in the method, so that they can - // at least be inspected on entry to the method. - end++; - } - - if (start < end) - { - genSetScopeInfo(liveRangeIndex, start, end - start, varNum, varNum, true, loc); - liveRangeIndex++; - } - }; - - siVarLoc* curLoc = nullptr; - UNATIVE_OFFSET curStart = 0; - UNATIVE_OFFSET curEnd = 0; - - for (int rangeIndex = 0; rangeIndex < 2; rangeIndex++) - { - VariableLiveKeeper::LiveRangeList* liveRanges; - if (rangeIndex == 0) - { - liveRanges = varLiveKeeper->getLiveRangesForVarForProlog(varNum); - } - else - { - liveRanges = varLiveKeeper->getLiveRangesForVarForBody(varNum); - } - - for (VariableLiveKeeper::VariableLiveRange& liveRange : *liveRanges) - { - UNATIVE_OFFSET startOffs = liveRange.m_StartEmitLocation.CodeOffset(GetEmitter()); - UNATIVE_OFFSET endOffs = liveRange.m_EndEmitLocation.CodeOffset(GetEmitter()); - - assert(startOffs <= endOffs); - assert(startOffs >= curEnd); - if ((curLoc != nullptr) && (startOffs == curEnd) && siVarLoc::Equals(curLoc, &liveRange.m_VarLocation)) - { - // Extend current range. - curEnd = endOffs; - continue; - } - - // Report old range if any. - if (curLoc != nullptr) - { - reportRange(curLoc, curStart, curEnd); - } - - // Start a new range. - curLoc = &liveRange.m_VarLocation; - curStart = startOffs; - curEnd = endOffs; - } - } - - // Report last range - if (curLoc != nullptr) - { - reportRange(curLoc, curStart, curEnd); - } - } - - compiler->eeVarsCount = liveRangeIndex; -} - -//------------------------------------------------------------------------ -// genSetScopeInfo: Record scope information for debug info -// -// Arguments: -// which -// startOffs - the starting offset for this scope -// length - the length of this scope -// varNum - the lclVar for this scope info -// LVnum -// avail - a bool indicating if it has a home -// varLoc - the position (reg or stack) of the variable -// -// Notes: -// Called for every scope info piece to record by the main genSetScopeInfo() - -void CodeGen::genSetScopeInfo(unsigned which, - UNATIVE_OFFSET startOffs, - UNATIVE_OFFSET length, - unsigned varNum, - unsigned LVnum, - bool avail, - siVarLoc* varLoc) -{ - // We need to do some mapping while reporting back these variables. - - unsigned ilVarNum = compiler->compMap2ILvarNum(varNum); - noway_assert((int)ilVarNum != ICorDebugInfo::UNKNOWN_ILNUM); - -#ifdef TARGET_X86 - // Non-x86 platforms are allowed to access all arguments directly - // so we don't need this code. - - // Is this a varargs function? - if (compiler->info.compIsVarArgs && varNum != compiler->lvaVarargsHandleArg && - varNum < compiler->info.compArgsCount && !compiler->lvaGetDesc(varNum)->lvIsRegArg) - { - noway_assert(varLoc->vlType == VLT_STK || varLoc->vlType == VLT_STK2); - - // All stack arguments (except the varargs handle) have to be - // accessed via the varargs cookie. Discard generated info, - // and just find its position relative to the varargs handle - - PREFIX_ASSUME(compiler->lvaVarargsHandleArg < compiler->info.compArgsCount); - if (!compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->lvOnFrame) - { - noway_assert(!compiler->opts.compDbgCode); - return; - } - - // Can't check compiler->lvaTable[varNum].lvOnFrame as we don't set it for - // arguments of vararg functions to avoid reporting them to GC. - noway_assert(!compiler->lvaGetDesc(varNum)->lvRegister); - unsigned cookieOffset = compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->GetStackOffset(); - unsigned varOffset = compiler->lvaGetDesc(varNum)->GetStackOffset(); - - noway_assert(cookieOffset < varOffset); - unsigned offset = varOffset - cookieOffset; - unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES; - noway_assert(offset < stkArgSize); - offset = stkArgSize - offset; - - varLoc->vlType = VLT_FIXED_VA; - varLoc->vlFixedVarArg.vlfvOffset = offset; - } - -#endif // TARGET_X86 - - VarName name = nullptr; - -#ifdef DEBUG - - for (unsigned scopeNum = 0; scopeNum < compiler->info.compVarScopesCount; scopeNum++) - { - if (LVnum == compiler->info.compVarScopes[scopeNum].vsdLVnum) - { - name = compiler->info.compVarScopes[scopeNum].vsdName; - } - } - - // Hang on to this compiler->info. - - TrnslLocalVarInfo& tlvi = genTrnslLocalVarInfo[which]; - - tlvi.tlviVarNum = ilVarNum; - tlvi.tlviLVnum = LVnum; - tlvi.tlviName = name; - tlvi.tlviStartPC = startOffs; - tlvi.tlviLength = length; - tlvi.tlviAvailable = avail; - tlvi.tlviVarLoc = *varLoc; - -#endif // DEBUG - - compiler->eeSetLVinfo(which, startOffs, length, ilVarNum, *varLoc); -} - -/*****************************************************************************/ -#ifdef LATE_DISASM -#if defined(DEBUG) -/***************************************************************************** - * CompilerRegName - * - * Can be called only after lviSetLocalVarInfo() has been called - */ - -/* virtual */ -const char* CodeGen::siRegVarName(size_t offs, size_t size, unsigned reg) -{ - if (!compiler->opts.compScopeInfo) - return nullptr; - - if (compiler->info.compVarScopesCount == 0) - return nullptr; - - noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo); - - for (unsigned i = 0; i < genTrnslLocalVarCount; i++) - { - if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsInReg((regNumber)reg)) && - (genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) && - (genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs)) - { - return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL; - } - } - - return NULL; -} - -/***************************************************************************** - * CompilerStkName - * - * Can be called only after lviSetLocalVarInfo() has been called - */ - -/* virtual */ -const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs) -{ - if (!compiler->opts.compScopeInfo) - return nullptr; - - if (compiler->info.compVarScopesCount == 0) - return nullptr; - - noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo); - - for (unsigned i = 0; i < genTrnslLocalVarCount; i++) - { - if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsOnStack((regNumber)reg, stkOffs)) && - (genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) && - (genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs)) - { - return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL; - } - } - - return NULL; -} - -/*****************************************************************************/ -#endif // defined(DEBUG) -#endif // LATE_DISASM - #ifdef DEBUG /***************************************************************************** @@ -8457,814 +8240,6 @@ unsigned CodeGenInterface::getCurrentStackLevel() const return genStackLevel; } -#ifdef DEBUG -//------------------------------------------------------------------------ -// VariableLiveRanges dumpers -//------------------------------------------------------------------------ - -// Dump "VariableLiveRange" when code has not been generated and we don't have so the assembly native offset -// but at least "emitLocation"s and "siVarLoc" -void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRange( - const CodeGenInterface* codeGen) const -{ - codeGen->dumpSiVarLoc(&m_VarLocation); - - printf(" ["); - m_StartEmitLocation.Print(codeGen->GetCompiler()->compMethodID); - printf(", "); - if (m_EndEmitLocation.Valid()) - { - m_EndEmitLocation.Print(codeGen->GetCompiler()->compMethodID); - } - else - { - printf("..."); - } - printf("]"); -} - -// Dump "VariableLiveRange" when code has been generated and we have the assembly native offset of each "emitLocation" -void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRange( - emitter* emit, const CodeGenInterface* codeGen) const -{ - assert(emit != nullptr); - - // "VariableLiveRanges" are created setting its location ("m_VarLocation") and the initial native offset - // ("m_StartEmitLocation") - codeGen->dumpSiVarLoc(&m_VarLocation); - - // If this is an open "VariableLiveRange", "m_EndEmitLocation" is non-valid and print -1 - UNATIVE_OFFSET endAssemblyOffset = m_EndEmitLocation.Valid() ? m_EndEmitLocation.CodeOffset(emit) : -1; - - printf(" [%X, %X)", m_StartEmitLocation.CodeOffset(emit), m_EndEmitLocation.CodeOffset(emit)); -} - -//------------------------------------------------------------------------ -// LiveRangeDumper -//------------------------------------------------------------------------ -//------------------------------------------------------------------------ -// resetDumper: If the "liveRange" has its last "VariableLiveRange" closed, it makes -// the "LiveRangeDumper" points to end of "liveRange" (nullptr). In other case, -// it makes the "LiveRangeDumper" points to the last "VariableLiveRange" of -// "liveRange", which is opened. -// -// Arguments: -// liveRanges - the "LiveRangeList" of the "VariableLiveDescriptor" we want to -// update its "LiveRangeDumper". -// -// Notes: -// This method is expected to be called once a the code for a BasicBlock has been -// generated and all the new "VariableLiveRange"s of the variable during this block -// has been dumped. -void CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::resetDumper(const LiveRangeList* liveRanges) -{ - // There must have reported something in order to reset - assert(m_hasLiveRangestoDump); - - if (liveRanges->back().m_EndEmitLocation.Valid()) - { - // the last "VariableLiveRange" is closed and the variable - // is no longer alive - m_hasLiveRangestoDump = false; - } - else - { - // the last "VariableLiveRange" remains opened because it is - // live at "BasicBlock"s "bbLiveOut". - m_StartingLiveRange = liveRanges->backPosition(); - } -} - -//------------------------------------------------------------------------ -// setDumperStartAt: Make "LiveRangeDumper" instance points the last "VariableLiveRange" -// added so we can starts dumping from there after the actual "BasicBlock"s code is generated. -// -// Arguments: -// liveRangeIt - an iterator to a position in "VariableLiveDescriptor::m_VariableLiveRanges" -// -// Return Value: -// A const pointer to the "LiveRangeList" containing all the "VariableLiveRange"s -// of the variable with index "varNum". -// -// Notes: -// "varNum" should be always a valid inde ("varnum" < "m_LiveDscCount") -void CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::setDumperStartAt(const LiveRangeListIterator liveRangeIt) -{ - m_hasLiveRangestoDump = true; - m_StartingLiveRange = liveRangeIt; -} - -//------------------------------------------------------------------------ -// getStartForDump: Return an iterator to the first "VariableLiveRange" edited/added -// during the current "BasicBlock" -// -// Return Value: -// A LiveRangeListIterator to the first "VariableLiveRange" in "LiveRangeList" which -// was used during last "BasicBlock". -// -CodeGenInterface::VariableLiveKeeper::LiveRangeListIterator CodeGenInterface::VariableLiveKeeper::LiveRangeDumper:: - getStartForDump() const -{ - return m_StartingLiveRange; -} - -//------------------------------------------------------------------------ -// hasLiveRangesToDump: Retutn whether at least a "VariableLiveRange" was alive during -// the current "BasicBlock"'s code generation -// -// Return Value: -// A boolean indicating indicating if there is at least a "VariableLiveRange" -// that has been used for the variable during last "BasicBlock". -// -bool CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::hasLiveRangesToDump() const -{ - return m_hasLiveRangestoDump; -} -#endif // DEBUG - -//------------------------------------------------------------------------ -// VariableLiveDescriptor -//------------------------------------------------------------------------ - -CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::VariableLiveDescriptor(CompAllocator allocator) -{ - // Initialize an empty list - m_VariableLiveRanges = new (allocator) LiveRangeList(allocator); - - INDEBUG(m_VariableLifeBarrier = new (allocator) LiveRangeDumper(m_VariableLiveRanges)); -} - -//------------------------------------------------------------------------ -// hasVariableLiveRangeOpen: Return true if the variable is still alive, -// false in other case. -// -bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVariableLiveRangeOpen() const -{ - return !m_VariableLiveRanges->empty() && !m_VariableLiveRanges->back().m_EndEmitLocation.Valid(); -} - -//------------------------------------------------------------------------ -// getLiveRanges: Return the list of variable locations for this variable. -// -// Return Value: -// A const LiveRangeList* pointing to the first variable location if it has -// any or the end of the list in other case. -// -CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor:: - getLiveRanges() const -{ - return m_VariableLiveRanges; -} - -//------------------------------------------------------------------------ -// startLiveRangeFromEmitter: Report this variable as being born in "varLocation" -// since the instruction where "emit" is located. -// -// Arguments: -// varLocation - the home of the variable. -// emit - an emitter* instance located at the first instruction from -// where "varLocation" becomes valid. -// -// Assumptions: -// This variable is being born so it should be dead. -// -// Notes: -// The position of "emit" matters to ensure intervals inclusive of the -// beginning and exclusive of the end. -// -void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::startLiveRangeFromEmitter( - CodeGenInterface::siVarLoc varLocation, emitter* emit) const -{ - noway_assert(emit != nullptr); - - // Is the first "VariableLiveRange" or the previous one has been closed so its "m_EndEmitLocation" is valid - noway_assert(m_VariableLiveRanges->empty() || m_VariableLiveRanges->back().m_EndEmitLocation.Valid()); - - if (!m_VariableLiveRanges->empty() && - siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) && - m_VariableLiveRanges->back().m_EndEmitLocation.IsPreviousInsNum(emit)) - { - JITDUMP("Extending debug range...\n"); - - // The variable is being born just after the instruction at which it died. - // In this case, i.e. an update of the variable's value, we coalesce the live ranges. - m_VariableLiveRanges->back().m_EndEmitLocation.Init(); - } - else - { - JITDUMP("New debug range: %s\n", - m_VariableLiveRanges->empty() - ? "first" - : siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) - ? "new var or location" - : "not adjacent"); - // Creates new live range with invalid end - m_VariableLiveRanges->emplace_back(varLocation, emitLocation(), emitLocation()); - m_VariableLiveRanges->back().m_StartEmitLocation.CaptureLocation(emit); - } - -#ifdef DEBUG - if (!m_VariableLifeBarrier->hasLiveRangesToDump()) - { - m_VariableLifeBarrier->setDumperStartAt(m_VariableLiveRanges->backPosition()); - } -#endif // DEBUG - - // startEmitLocationendEmitLocation has to be Valid and endEmitLocationendEmitLocation not - noway_assert(m_VariableLiveRanges->back().m_StartEmitLocation.Valid()); - noway_assert(!m_VariableLiveRanges->back().m_EndEmitLocation.Valid()); -} - -//------------------------------------------------------------------------ -// endLiveRangeAtEmitter: Report this variable as becoming dead since the -// instruction where "emit" is located. -// -// Arguments: -// emit - an emitter* instance located at the first instruction from -// this variable becomes dead. -// -// Assumptions: -// This variable is becoming dead so it should be alive. -// -// Notes: -// The position of "emit" matters to ensure intervals inclusive of the -// beginning and exclusive of the end. -// -void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::endLiveRangeAtEmitter(emitter* emit) const -{ - noway_assert(emit != nullptr); - noway_assert(hasVariableLiveRangeOpen()); - - // Using [close, open) ranges so as to not compute the size of the last instruction - m_VariableLiveRanges->back().m_EndEmitLocation.CaptureLocation(emit); - - JITDUMP("Closing debug range.\n"); - // No m_EndEmitLocation has to be Valid - noway_assert(m_VariableLiveRanges->back().m_EndEmitLocation.Valid()); -} - -//------------------------------------------------------------------------ -// UpdateLiveRangeAtEmitter: Report this variable as changing its variable -// home to "varLocation" since the instruction where "emit" is located. -// -// Arguments: -// varLocation - the new variable location. -// emit - an emitter* instance located at the first instruction from -// where "varLocation" becomes valid. -// -// Assumptions: -// This variable is being born so it should be dead. -// -// Notes: -// The position of "emit" matters to ensure intervals inclusive of the -// beginning and exclusive of the end. -// -void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::updateLiveRangeAtEmitter( - CodeGenInterface::siVarLoc varLocation, emitter* emit) const -{ - // This variable is changing home so it has been started before during this block - noway_assert(m_VariableLiveRanges != nullptr && !m_VariableLiveRanges->empty()); - - // And its last m_EndEmitLocation has to be invalid - noway_assert(!m_VariableLiveRanges->back().m_EndEmitLocation.Valid()); - - // If we are reporting again the same home, that means we are doing something twice? - // noway_assert(! CodeGenInterface::siVarLoc::Equals(&m_VariableLiveRanges->back().m_VarLocation, varLocation)); - - // Close previous live range - endLiveRangeAtEmitter(emit); - - startLiveRangeFromEmitter(varLocation, emit); -} - -#ifdef DEBUG -void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpAllRegisterLiveRangesForBlock( - emitter* emit, const CodeGenInterface* codeGen) const -{ - bool first = true; - for (LiveRangeListIterator it = m_VariableLiveRanges->begin(); it != m_VariableLiveRanges->end(); it++) - { - if (!first) - { - printf("; "); - } - it->dumpVariableLiveRange(emit, codeGen); - first = false; - } -} - -void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpRegisterLiveRangesForBlockBeforeCodeGenerated( - const CodeGenInterface* codeGen) const -{ - bool first = true; - for (LiveRangeListIterator it = m_VariableLifeBarrier->getStartForDump(); it != m_VariableLiveRanges->end(); it++) - { - if (!first) - { - printf("; "); - } - it->dumpVariableLiveRange(codeGen); - first = false; - } -} - -// Returns true if a live range for this variable has been recorded -bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVarLiveRangesToDump() const -{ - return !m_VariableLiveRanges->empty(); -} - -// Returns true if a live range for this variable has been recorded from last call to EndBlock -bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVarLiveRangesFromLastBlockToDump() const -{ - return m_VariableLifeBarrier->hasLiveRangesToDump(); -} - -// Reset the barrier so as to dump only next block changes on next block -void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::endBlockLiveRanges() -{ - // make "m_VariableLifeBarrier->m_StartingLiveRange" now points to nullptr for printing purposes - m_VariableLifeBarrier->resetDumper(m_VariableLiveRanges); -} -#endif // DEBUG - -//------------------------------------------------------------------------ -// VariableLiveKeeper -//------------------------------------------------------------------------ -// Initialize structures for VariableLiveRanges -void CodeGenInterface::initializeVariableLiveKeeper() -{ - CompAllocator allocator = compiler->getAllocator(CMK_VariableLiveRanges); - - int amountTrackedVariables = compiler->opts.compDbgInfo ? compiler->info.compLocalsCount : 0; - int amountTrackedArgs = compiler->opts.compDbgInfo ? compiler->info.compArgsCount : 0; - - varLiveKeeper = new (allocator) VariableLiveKeeper(amountTrackedVariables, amountTrackedArgs, compiler, allocator); -} - -CodeGenInterface::VariableLiveKeeper* CodeGenInterface::getVariableLiveKeeper() const -{ - return varLiveKeeper; -}; - -//------------------------------------------------------------------------ -// VariableLiveKeeper: Create an instance of the object in charge of managing -// VariableLiveRanges and initialize the array "m_vlrLiveDsc". -// -// Arguments: -// totalLocalCount - the count of args, special args and IL Local -// variables in the method. -// argsCount - the count of args and special args in the method. -// compiler - a compiler instance -// -CodeGenInterface::VariableLiveKeeper::VariableLiveKeeper(unsigned int totalLocalCount, - unsigned int argsCount, - Compiler* comp, - CompAllocator allocator) - : m_LiveDscCount(totalLocalCount) - , m_LiveArgsCount(argsCount) - , m_Compiler(comp) - , m_LastBasicBlockHasBeenEmitted(false) -{ - if (m_LiveDscCount > 0) - { - // Allocate memory for "m_vlrLiveDsc" and initialize each "VariableLiveDescriptor" - m_vlrLiveDsc = allocator.allocate(m_LiveDscCount); - m_vlrLiveDscForProlog = allocator.allocate(m_LiveDscCount); - - for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) - { - new (m_vlrLiveDsc + varNum, jitstd::placement_t()) VariableLiveDescriptor(allocator); - new (m_vlrLiveDscForProlog + varNum, jitstd::placement_t()) VariableLiveDescriptor(allocator); - } - } -} - -//------------------------------------------------------------------------ -// siStartOrCloseVariableLiveRange: Reports the given variable as beign born -// or becoming dead. -// -// Arguments: -// varDsc - the variable for which a location changed will be reported -// varNum - the index of the variable in the "compiler->lvaTable" -// isBorn - whether the variable is being born from where the emitter is located. -// isDying - whether the variable is dying from where the emitter is located. -// -// Assumptions: -// The emitter should be located on the first instruction from where is true that -// the variable becoming valid (when isBorn is true) or invalid (when isDying is true). -// -// Notes: -// This method is being called from treeLifeUpdater when the variable is being born, -// becoming dead, or both. -// -void CodeGenInterface::VariableLiveKeeper::siStartOrCloseVariableLiveRange(const LclVarDsc* varDsc, - unsigned int varNum, - bool isBorn, - bool isDying) -{ - noway_assert(varDsc != nullptr); - - // Only the variables that exists in the IL, "this", and special arguments - // are reported. - if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount) - { - if (isBorn && !isDying) - { - // "varDsc" is valid from this point - siStartVariableLiveRange(varDsc, varNum); - } - if (isDying && !isBorn) - { - // this variable live range is no longer valid from this point - siEndVariableLiveRange(varNum); - } - } -} - -//------------------------------------------------------------------------ -// siStartOrCloseVariableLiveRanges: Iterates the given set of variables -// calling "siStartOrCloseVariableLiveRange" with each one. -// -// Arguments: -// varsIndexSet - the set of variables to report start/end "VariableLiveRange" -// isBorn - whether the set is being born from where the emitter is located. -// isDying - whether the set is dying from where the emitter is located. -// -// Assumptions: -// The emitter should be located on the first instruction from where is true that -// the variable becoming valid (when isBorn is true) or invalid (when isDying is true). -// -// Notes: -// This method is being called from treeLifeUpdater when a set of variables -// is being born, becoming dead, or both. -// -void CodeGenInterface::VariableLiveKeeper::siStartOrCloseVariableLiveRanges(VARSET_VALARG_TP varsIndexSet, - bool isBorn, - bool isDying) -{ - if (m_Compiler->opts.compDbgInfo) - { - VarSetOps::Iter iter(m_Compiler, varsIndexSet); - unsigned varIndex = 0; - while (iter.NextElem(&varIndex)) - { - unsigned int varNum = m_Compiler->lvaTrackedIndexToLclNum(varIndex); - const LclVarDsc* varDsc = m_Compiler->lvaGetDesc(varNum); - siStartOrCloseVariableLiveRange(varDsc, varNum, isBorn, isDying); - } - } -} - -//------------------------------------------------------------------------ -// siStartVariableLiveRange: Reports the given variable as being born. -// -// Arguments: -// varDsc - the variable for which a location changed will be reported -// varNum - the index of the variable to report home in lvLiveDsc -// -// Assumptions: -// The emitter should be pointing to the first instruction from where the VariableLiveRange is -// becoming valid. -// The given "varDsc" should have its VariableRangeLists initialized. -// -// Notes: -// This method should be called on every place a Variable is becoming alive. -void CodeGenInterface::VariableLiveKeeper::siStartVariableLiveRange(const LclVarDsc* varDsc, unsigned int varNum) -{ - noway_assert(varDsc != nullptr); - - // Only the variables that exists in the IL, "this", and special arguments are reported, as long as they were - // allocated. - if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && (varDsc->lvIsInReg() || varDsc->lvOnFrame)) - { - // Build siVarLoc for this born "varDsc" - CodeGenInterface::siVarLoc varLocation = - m_Compiler->codeGen->getSiVarLoc(varDsc, m_Compiler->codeGen->getCurrentStackLevel()); - - VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum]; - // this variable live range is valid from this point - varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter()); - } -} - -//------------------------------------------------------------------------ -// siEndVariableLiveRange: Reports the variable as becoming dead. -// -// Arguments: -// varNum - the index of the variable at m_vlrLiveDsc or lvaTable in that -// is becoming dead. -// -// Assumptions: -// The given variable should be alive. -// The emitter should be pointing to the first instruction from where the VariableLiveRange is -// becoming invalid. -// -// Notes: -// This method should be called on every place a Variable is becoming dead. -void CodeGenInterface::VariableLiveKeeper::siEndVariableLiveRange(unsigned int varNum) -{ - // Only the variables that exists in the IL, "this", and special arguments - // will be reported. - - // This method is being called from genUpdateLife, and that one is called after - // code for BasicBlock have been generated, but the emitter has no longer - // a valid IG so we don't report the close of a "VariableLiveRange" after code is - // emitted. - - if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && !m_LastBasicBlockHasBeenEmitted && - m_vlrLiveDsc[varNum].hasVariableLiveRangeOpen()) - { - // this variable live range is no longer valid from this point - m_vlrLiveDsc[varNum].endLiveRangeAtEmitter(m_Compiler->GetEmitter()); - } -} - -//------------------------------------------------------------------------ -// siUpdateVariableLiveRange: Reports the change of variable location for the -// given variable. -// -// Arguments: -// varDsc - the variable for which tis home has changed. -// varNum - the index of the variable to report home in lvLiveDsc -// -// Assumptions: -// The given variable should be alive. -// The emitter should be pointing to the first instruction from where -// the new variable location is becoming valid. -// -void CodeGenInterface::VariableLiveKeeper::siUpdateVariableLiveRange(const LclVarDsc* varDsc, unsigned int varNum) -{ - noway_assert(varDsc != nullptr); - - // Only the variables that exists in the IL, "this", and special arguments - // will be reported. This are locals and arguments, and are counted in - // "info.compLocalsCount". - - // This method is being called when the prolog is being generated, and - // the emitter has no longer a valid IG so we don't report the close of - // a "VariableLiveRange" after code is emitted. - if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount && !m_LastBasicBlockHasBeenEmitted) - { - // Build the location of the variable - CodeGenInterface::siVarLoc siVarLoc = - m_Compiler->codeGen->getSiVarLoc(varDsc, m_Compiler->codeGen->getCurrentStackLevel()); - - // Report the home change for this variable - VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum]; - varLiveDsc->updateLiveRangeAtEmitter(siVarLoc, m_Compiler->GetEmitter()); - } -} - -//------------------------------------------------------------------------ -// siEndAllVariableLiveRange: Reports the set of variables as becoming dead. -// -// Arguments: -// newLife - the set of variables that are becoming dead. -// -// Assumptions: -// All the variables in the set are alive. -// -// Notes: -// This method is called when the last block being generated to killed all -// the live variables and set a flag to avoid reporting variable locations for -// on next calls to method that update variable liveness. -void CodeGenInterface::VariableLiveKeeper::siEndAllVariableLiveRange(VARSET_VALARG_TP varsToClose) -{ - if (m_Compiler->opts.compDbgInfo) - { - if (m_Compiler->lvaTrackedCount > 0 || !m_Compiler->opts.OptimizationDisabled()) - { - VarSetOps::Iter iter(m_Compiler, varsToClose); - unsigned varIndex = 0; - while (iter.NextElem(&varIndex)) - { - unsigned int varNum = m_Compiler->lvaTrackedIndexToLclNum(varIndex); - siEndVariableLiveRange(varNum); - } - } - else - { - // It seems we are jitting debug code, so we don't have variable - // liveness info - siEndAllVariableLiveRange(); - } - } - - m_LastBasicBlockHasBeenEmitted = true; -} - -//------------------------------------------------------------------------ -// siEndAllVariableLiveRange: Reports all live variables as dead. -// -// Notes: -// This overload exists for the case we are jitting code compiled in -// debug mode. When that happen we don't have variable liveness info -// as "BaiscBlock::bbLiveIn" or "BaiscBlock::bbLiveOut" and there is no -// tracked variable. -// -void CodeGenInterface::VariableLiveKeeper::siEndAllVariableLiveRange() -{ - // TODO: we can improve this keeping a set for the variables with - // open VariableLiveRanges - - for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) - { - const VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum; - if (varLiveDsc->hasVariableLiveRangeOpen()) - { - siEndVariableLiveRange(varNum); - } - } -} - -//------------------------------------------------------------------------ -// getLiveRangesForVarForBody: Return the "VariableLiveRange" that correspond to -// the given "varNum". -// -// Arguments: -// varNum - the index of the variable in m_vlrLiveDsc, which is the same as -// in lvaTable. -// -// Return Value: -// A const pointer to the list of variable locations reported for the variable. -// -// Assumptions: -// This variable should be an argument, a special argument or an IL local -// variable. -CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::getLiveRangesForVarForBody( - unsigned int varNum) const -{ - // There should be at least one variable for which its liveness is tracked - noway_assert(varNum < m_LiveDscCount); - - return m_vlrLiveDsc[varNum].getLiveRanges(); -} - -//------------------------------------------------------------------------ -// getLiveRangesForVarForProlog: Return the "VariableLiveRange" that correspond to -// the given "varNum". -// -// Arguments: -// varNum - the index of the variable in m_vlrLiveDsc, which is the same as -// in lvaTable. -// -// Return Value: -// A const pointer to the list of variable locations reported for the variable. -// -// Assumptions: -// This variable should be an argument, a special argument or an IL local -// variable. -CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::getLiveRangesForVarForProlog( - unsigned int varNum) const -{ - // There should be at least one variable for which its liveness is tracked - noway_assert(varNum < m_LiveDscCount); - - return m_vlrLiveDscForProlog[varNum].getLiveRanges(); -} - -//------------------------------------------------------------------------ -// getLiveRangesCount: Returns the count of variable locations reported for the tracked -// variables, which are arguments, special arguments, and local IL variables. -// -// Return Value: -// size_t - the count of variable locations -// -// Notes: -// This method is being called from "genSetScopeInfo" to know the count of -// "varResultInfo" that should be created on eeSetLVcount. -// -size_t CodeGenInterface::VariableLiveKeeper::getLiveRangesCount() const -{ - size_t liveRangesCount = 0; - - if (m_Compiler->opts.compDbgInfo) - { - for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) - { - for (int i = 0; i < 2; i++) - { - VariableLiveDescriptor* varLiveDsc = (i == 0 ? m_vlrLiveDscForProlog : m_vlrLiveDsc) + varNum; - - if (m_Compiler->compMap2ILvarNum(varNum) != (unsigned int)ICorDebugInfo::UNKNOWN_ILNUM) - { - liveRangesCount += varLiveDsc->getLiveRanges()->size(); - } - } - } - } - return liveRangesCount; -} - -//------------------------------------------------------------------------ -// psiStartVariableLiveRange: Reports the given variable as being born. -// -// Arguments: -// varLcation - the variable location -// varNum - the index of the variable in "compiler->lvaTable" or -// "VariableLivekeeper->m_vlrLiveDsc" -// -// Notes: -// This function is expected to be called from "psiBegProlog" during -// prolog code generation. -// -void CodeGenInterface::VariableLiveKeeper::psiStartVariableLiveRange(CodeGenInterface::siVarLoc varLocation, - unsigned int varNum) -{ - // This descriptor has to correspond to a parameter. The first slots in lvaTable - // are arguments and special arguments. - noway_assert(varNum < m_LiveArgsCount); - - VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDscForProlog[varNum]; - varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter()); -} - -//------------------------------------------------------------------------ -// psiClosePrologVariableRanges: Report all the parameters as becoming dead. -// -// Notes: -// This function is expected to be called from preffix "psiEndProlog" after -// code for prolog has been generated. -// -void CodeGenInterface::VariableLiveKeeper::psiClosePrologVariableRanges() -{ - noway_assert(m_LiveArgsCount <= m_LiveDscCount); - - for (unsigned int varNum = 0; varNum < m_LiveArgsCount; varNum++) - { - VariableLiveDescriptor* varLiveDsc = m_vlrLiveDscForProlog + varNum; - - if (varLiveDsc->hasVariableLiveRangeOpen()) - { - varLiveDsc->endLiveRangeAtEmitter(m_Compiler->GetEmitter()); - } - } -} - -#ifdef DEBUG -void CodeGenInterface::VariableLiveKeeper::dumpBlockVariableLiveRanges(const BasicBlock* block) -{ - assert(block != nullptr); - - bool hasDumpedHistory = false; - - printf("\nVariable Live Range History Dump for " FMT_BB "\n", block->bbNum); - - if (m_Compiler->opts.compDbgInfo) - { - for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) - { - VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum; - - if (varLiveDsc->hasVarLiveRangesFromLastBlockToDump()) - { - hasDumpedHistory = true; - m_Compiler->gtDispLclVar(varNum, false); - printf(": "); - varLiveDsc->dumpRegisterLiveRangesForBlockBeforeCodeGenerated(m_Compiler->codeGen); - varLiveDsc->endBlockLiveRanges(); - printf("\n"); - } - } - } - - if (!hasDumpedHistory) - { - printf("..None..\n"); - } -} - -void CodeGenInterface::VariableLiveKeeper::dumpLvaVariableLiveRanges() const -{ - bool hasDumpedHistory = false; - - printf("VARIABLE LIVE RANGES:\n"); - - if (m_Compiler->opts.compDbgInfo) - { - for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) - { - VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum; - - if (varLiveDsc->hasVarLiveRangesToDump()) - { - hasDumpedHistory = true; - m_Compiler->gtDispLclVar(varNum, false); - printf(": "); - varLiveDsc->dumpAllRegisterLiveRangesForBlock(m_Compiler->GetEmitter(), m_Compiler->codeGen); - printf("\n"); - } - } - } - - if (!hasDumpedHistory) - { - printf("..None..\n"); - } -} -#endif // DEBUG - //----------------------------------------------------------------------------- // genPoisonFrame: Generate code that places a recognizable value into address exposed variables. // diff --git a/src/coreclr/jit/codegeninterface.h b/src/coreclr/jit/codegeninterface.h index bd931e598384a..d0dbccabda6cb 100644 --- a/src/coreclr/jit/codegeninterface.h +++ b/src/coreclr/jit/codegeninterface.h @@ -667,21 +667,21 @@ class CodeGenInterface class LiveRangeDumper { // Iterator to the first edited/added position during actual block code generation. If last - // block had a closed "VariableLiveRange" (with a valid "m_EndEmitLocation") and not changes + // block had a closed "VariableLiveRange" (with a valid "m_EndEmitLocation") and no changes // were applied to variable liveness, it points to the end of variable's LiveRangeList. - LiveRangeListIterator m_StartingLiveRange; - bool m_hasLiveRangestoDump; // True if a live range for this variable has been + LiveRangeListIterator m_startingLiveRange; + bool m_hasLiveRangesToDump; // True if a live range for this variable has been // reported from last call to EndBlock public: LiveRangeDumper(const LiveRangeList* liveRanges) - : m_StartingLiveRange(liveRanges->end()), m_hasLiveRangestoDump(false){}; + : m_startingLiveRange(liveRanges->end()), m_hasLiveRangesToDump(false){}; // Make the dumper point to the last "VariableLiveRange" opened or nullptr if all are closed void resetDumper(const LiveRangeList* list); - // Make "LiveRangeDumper" instance points the last "VariableLiveRange" added so we can - // start dumping from there after the actual "BasicBlock"s code is generated. + // Make "LiveRangeDumper" instance point at the last "VariableLiveRange" added so we can + // start dumping from there after the "BasicBlock"s code is generated. void setDumperStartAt(const LiveRangeListIterator liveRangeIt); // Return an iterator to the first "VariableLiveRange" edited/added during the current @@ -703,10 +703,11 @@ class CodeGenInterface class VariableLiveDescriptor { LiveRangeList* m_VariableLiveRanges; // the variable locations of this variable - INDEBUG(LiveRangeDumper* m_VariableLifeBarrier); + INDEBUG(LiveRangeDumper* m_VariableLifeBarrier;) + INDEBUG(unsigned m_varNum;) public: - VariableLiveDescriptor(CompAllocator allocator); + VariableLiveDescriptor(CompAllocator allocator DEBUG_ARG(unsigned varNum)); bool hasVariableLiveRangeOpen() const; LiveRangeList* getLiveRanges() const; diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index 1e50106464556..d36eeb32210f9 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -882,8 +882,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // None. // // Assumptions: -// The lclVar must be a register candidate (lvRegCandidate) - +// The lclVar must be a register candidate (lvIsRegCandidate()) +// void CodeGen::genSpillVar(GenTree* tree) { unsigned varNum = tree->AsLclVarCommon()->GetLclNum(); @@ -892,7 +892,7 @@ void CodeGen::genSpillVar(GenTree* tree) assert(varDsc->lvIsRegCandidate()); // We don't actually need to spill if it is already living in memory - bool needsSpill = ((tree->gtFlags & GTF_VAR_DEF) == 0 && varDsc->lvIsInReg()); + const bool needsSpill = ((tree->gtFlags & GTF_VAR_DEF) == 0) && varDsc->lvIsInReg(); if (needsSpill) { // In order for a lclVar to have been allocated to a register, it must not have been aliasable, and can @@ -954,7 +954,7 @@ void CodeGen::genSpillVar(GenTree* tree) if (needsSpill) { - // We need this after "lvRegNum" has change because now we are sure that varDsc->lvIsInReg() is false. + // We need this after "lvRegNum" has changed because now we are sure that varDsc->lvIsInReg() is false. // "SiVarLoc" constructor uses the "LclVarDsc" of the variable. varLiveKeeper->siUpdateVariableLiveRange(varDsc, varNum); } @@ -1211,13 +1211,8 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) // Reset spilled flag, since we are going to load a local variable from its home location. unspillTree->gtFlags &= ~GTF_SPILLED; - GenTreeLclVar* lcl = unspillTree->AsLclVar(); - LclVarDsc* varDsc = compiler->lvaGetDesc(lcl); - var_types unspillType = varDsc->GetRegisterType(lcl); - assert(unspillType != TYP_UNDEF); - -// TODO-Cleanup: The following code could probably be further merged and cleaned up. -#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64) + GenTreeLclVar* lcl = unspillTree->AsLclVar(); + LclVarDsc* varDsc = compiler->lvaGetDesc(lcl); // Pick type to reload register from stack with. Note that in // general, the type of 'lcl' does not have any relation to the @@ -1241,19 +1236,11 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) // relies on the normalization to have happened here as part of // unspilling. // - if (varDsc->lvNormalizeOnLoad()) - { - unspillType = varDsc->TypeGet(); - } - else + var_types unspillType = varDsc->lvNormalizeOnLoad() ? varDsc->TypeGet() : varDsc->GetStackSlotHomeType(); + + if (varTypeIsGC(lcl)) { - // Potentially narrower -- see if we should widen. - var_types lclLoadType = varDsc->GetStackSlotHomeType(); - assert(lclLoadType != TYP_UNDEF); - if (genTypeSize(unspillType) < genTypeSize(lclLoadType)) - { - unspillType = lclLoadType; - } + unspillType = lcl->TypeGet(); } #if defined(TARGET_LOONGARCH64) @@ -1262,11 +1249,6 @@ void CodeGen::genUnspillRegIfNeeded(GenTree* tree) unspillType = unspillType == TYP_FLOAT ? TYP_INT : TYP_LONG; } #endif -#elif defined(TARGET_ARM) -// No normalizing for ARM -#else - NYI("Unspilling not implemented for this target architecture."); -#endif bool reSpill = ((unspillTree->gtFlags & GTF_SPILL) != 0); bool isLastUse = lcl->IsLastUse(0); @@ -2106,10 +2088,11 @@ void CodeGen::genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclN // node in codegen after code has been emitted for it. // // Arguments: -// tree - Gentree node +// tree - GenTree node // // Return Value: // None. +// void CodeGen::genProduceReg(GenTree* tree) { #ifdef DEBUG @@ -2346,8 +2329,8 @@ void CodeGen::genEmitCallIndir(int callType, int argSize = 0; #endif // !defined(TARGET_X86) - regNumber iReg = (indir->Base() != nullptr) ? indir->Base()->GetRegNum() : REG_NA; - regNumber xReg = (indir->Index() != nullptr) ? indir->Index()->GetRegNum() : REG_NA; + regNumber iReg = indir->HasBase() ? indir->Base()->GetRegNum() : REG_NA; + regNumber xReg = indir->HasIndex() ? indir->Index()->GetRegNum() : REG_NA; // These should have been put in volatile registers to ensure they do not // get overridden by epilog sequence during tailcall. diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index c63865121b5c0..f5b9e20b7c3b1 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1520,6 +1520,8 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_bl, block->bbJumpDest); + BasicBlock* const nextBlock = block->bbNext; + if (block->bbFlags & BBF_RETLESS_CALL) { // We have a retless call, and the last instruction generated was a call. @@ -1527,7 +1529,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // block), then we need to generate a breakpoint here (since it will never // get executed) to get proper unwind behavior. - if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext)) + if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) { instGen(INS_break); // This should never get executed } @@ -1539,8 +1541,10 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // handler. So turn off GC reporting for this single instruction. GetEmitter()->emitDisableGC(); + BasicBlock* const jumpDest = nextBlock->bbJumpDest; + // Now go to where the finally funclet needs to return to. - if (block->bbNext->bbJumpDest == block->bbNext->bbNext) + if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-LOONGARCH64-CQ: Can we get rid of this instruction, and just have the call return directly @@ -1550,7 +1554,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } else { - inst_JMP(EJ_jmp, block->bbNext->bbJumpDest); + inst_JMP(EJ_jmp, jumpDest); } GetEmitter()->emitEnableGC(); @@ -1563,7 +1567,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) if (!(block->bbFlags & BBF_RETLESS_CALL)) { assert(block->isBBCallAlwaysPair()); - block = block->bbNext; + block = nextBlock; } return block; } @@ -7340,7 +7344,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) // addressing mode instruction. Currently we're 'cheating' by producing one or more // instructions to generate the addressing mode so we need to modify lowering to // produce LEAs that are a 1:1 relationship to the LOONGARCH64 architecture. - if (lea->Base() && lea->Index()) + if (lea->HasBase() && lea->HasIndex()) { GenTree* memBase = lea->Base(); GenTree* index = lea->Index(); @@ -7385,7 +7389,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) } } } - else if (lea->Base()) + else if (lea->HasBase()) { GenTree* memBase = lea->Base(); @@ -7416,7 +7420,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) emit->emitIns_R_R_R(INS_add_d, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg); } } - else if (lea->Index()) + else if (lea->HasIndex()) { // If we encounter a GT_LEA node without a base it means it came out // when attempting to optimize an arbitrary arithmetic expression during lower. diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 6352fa958eff0..479cd7709c6ff 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -570,11 +570,11 @@ void CodeGen::genFuncletProlog(BasicBlock* block) // fiFrameType constraints: assert(frameSize < -2048); - offset = -frameSize - genFuncletInfo.fiSP_to_FPRA_save_delta; - int SP_delta = roundUp((UINT)offset, STACK_ALIGN); - offset = SP_delta - offset; + offset = -frameSize - genFuncletInfo.fiSP_to_FPRA_save_delta; + int spDelta = roundUp((UINT)offset, STACK_ALIGN); + offset = spDelta - offset; - genStackPointerAdjustment(-SP_delta, rsGetRsvdReg(), nullptr, /* reportUnwindData */ true); + genStackPointerAdjustment(-spDelta, rsGetRsvdReg(), nullptr, /* reportUnwindData */ true); GetEmitter()->emitIns_R_R_I(INS_sd, EA_PTRSIZE, REG_FP, REG_SPBASE, offset); compiler->unwindSaveReg(REG_FP, offset); @@ -584,10 +584,10 @@ void CodeGen::genFuncletProlog(BasicBlock* block) maskSaveRegsInt &= ~(RBM_RA | RBM_FP); // We've saved these now - offset = frameSize + SP_delta + genFuncletInfo.fiSP_to_PSP_slot_delta + 8; + offset = frameSize + spDelta + genFuncletInfo.fiSP_to_PSP_slot_delta + 8; genSaveCalleeSavedRegistersHelp(maskSaveRegsInt | maskSaveRegsFloat, offset, 0); - genStackPointerAdjustment(frameSize + SP_delta, rsGetRsvdReg(), nullptr, + genStackPointerAdjustment(frameSize + spDelta, rsGetRsvdReg(), nullptr, /* reportUnwindData */ true); } else @@ -700,15 +700,15 @@ void CodeGen::genFuncletEpilog() // fiFrameType constraints: assert(frameSize < -2048); - int offset = -frameSize - genFuncletInfo.fiSP_to_FPRA_save_delta; - int SP_delta = roundUp((UINT)offset, STACK_ALIGN); - offset = SP_delta - offset; + int offset = -frameSize - genFuncletInfo.fiSP_to_FPRA_save_delta; + int spDelta = roundUp((UINT)offset, STACK_ALIGN); + offset = spDelta - offset; // first, generate daddiu SP,SP,imm - genStackPointerAdjustment(-frameSize - SP_delta, rsGetRsvdReg(), nullptr, + genStackPointerAdjustment(-frameSize - spDelta, rsGetRsvdReg(), nullptr, /* reportUnwindData */ true); - int offset2 = frameSize + SP_delta + genFuncletInfo.fiSP_to_PSP_slot_delta + 8; + int offset2 = frameSize + spDelta + genFuncletInfo.fiSP_to_PSP_slot_delta + 8; assert(offset2 < 2040); // can amend. regsToRestoreMask &= ~(RBM_RA | RBM_FP); // We restore FP/RA at the end @@ -721,7 +721,7 @@ void CodeGen::genFuncletEpilog() compiler->unwindSaveReg(REG_FP, offset); // second, generate daddiu SP,SP,imm for remaine space. - genStackPointerAdjustment(SP_delta, rsGetRsvdReg(), nullptr, /* reportUnwindData */ true); + genStackPointerAdjustment(spDelta, rsGetRsvdReg(), nullptr, /* reportUnwindData */ true); } else { @@ -751,13 +751,13 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() assert((rsMaskSaveRegs & RBM_RA) != 0); assert((rsMaskSaveRegs & RBM_FP) != 0); - unsigned PSPSize = (compiler->lvaPSPSym != BAD_VAR_NUM) ? 8 : 0; + unsigned pspSize = (compiler->lvaPSPSym != BAD_VAR_NUM) ? 8 : 0; unsigned saveRegsCount = genCountBits(rsMaskSaveRegs); assert((saveRegsCount == compiler->compCalleeRegsPushed) || (saveRegsCount == compiler->compCalleeRegsPushed - 1)); unsigned saveRegsPlusPSPSize = - roundUp((UINT)genTotalFrameSize(), STACK_ALIGN) - compiler->compLclFrameSize + PSPSize; + roundUp((UINT)genTotalFrameSize(), STACK_ALIGN) - compiler->compLclFrameSize + pspSize; unsigned saveRegsPlusPSPSizeAligned = roundUp(saveRegsPlusPSPSize, STACK_ALIGN); @@ -767,7 +767,7 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() unsigned maxFuncletFrameSizeAligned = saveRegsPlusPSPSizeAligned + outgoingArgSpaceAligned; assert((maxFuncletFrameSizeAligned % STACK_ALIGN) == 0); - int SP_to_FPRA_save_delta = compiler->lvaOutgoingArgSpaceSize; + int spToFpraSaveDelta = compiler->lvaOutgoingArgSpaceSize; unsigned funcletFrameSize = saveRegsPlusPSPSize + compiler->lvaOutgoingArgSpaceSize; unsigned funcletFrameSizeAligned = roundUp(funcletFrameSize, STACK_ALIGN); @@ -790,16 +790,16 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() saveRegsPlusPSPSize -= 2 * 8; // FP/RA } - int CallerSP_to_PSP_slot_delta = -(int)saveRegsPlusPSPSize; - genFuncletInfo.fiSpDelta1 = -(int)funcletFrameSizeAligned; - int SP_to_PSP_slot_delta = funcletFrameSizeAligned - saveRegsPlusPSPSize; + int callerSpToPspSlotDelta = -(int)saveRegsPlusPSPSize; + genFuncletInfo.fiSpDelta1 = -(int)funcletFrameSizeAligned; + int spToPspSlotDelta = funcletFrameSizeAligned - saveRegsPlusPSPSize; /* Now save it for future use */ genFuncletInfo.fiSaveRegs = rsMaskSaveRegs; - genFuncletInfo.fiSP_to_FPRA_save_delta = SP_to_FPRA_save_delta; + genFuncletInfo.fiSP_to_FPRA_save_delta = spToFpraSaveDelta; - genFuncletInfo.fiSP_to_PSP_slot_delta = SP_to_PSP_slot_delta; - genFuncletInfo.fiCallerSP_to_PSP_slot_delta = CallerSP_to_PSP_slot_delta; + genFuncletInfo.fiSP_to_PSP_slot_delta = spToPspSlotDelta; + genFuncletInfo.fiCallerSP_to_PSP_slot_delta = callerSpToPspSlotDelta; #ifdef DEBUG if (verbose) @@ -816,8 +816,8 @@ void CodeGen::genCaptureFuncletPrologEpilogInfo() if (compiler->lvaPSPSym != BAD_VAR_NUM) { - if (CallerSP_to_PSP_slot_delta != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for - // debugging + if (callerSpToPspSlotDelta != compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for + // debugging { printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n", compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); @@ -1008,14 +1008,14 @@ void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed) noway_assert(isFramePointerUsed()); // We need an explicit frame pointer - int SPtoCallerSPdelta = -genCallerSPtoInitialSPdelta(); + int spToCallerSpDelta = -genCallerSPtoInitialSPdelta(); // We will just use the initReg since it is an available register // and we are probably done using it anyway... regNumber regTmp = initReg; *pInitRegZeroed = false; - genInstrWithConstant(INS_addi, EA_PTRSIZE, regTmp, REG_SPBASE, SPtoCallerSPdelta, rsGetRsvdReg(), false); + genInstrWithConstant(INS_addi, EA_PTRSIZE, regTmp, REG_SPBASE, spToCallerSpDelta, rsGetRsvdReg(), false); GetEmitter()->emitIns_S_R(INS_sd, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0); } @@ -1158,6 +1158,8 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } GetEmitter()->emitIns_J(INS_jal, block->bbJumpDest); + BasicBlock* const nextBlock = block->bbNext; + if (block->bbFlags & BBF_RETLESS_CALL) { // We have a retless call, and the last instruction generated was a call. @@ -1165,7 +1167,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // block), then we need to generate a breakpoint here (since it will never // get executed) to get proper unwind behavior. - if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext)) + if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) { instGen(INS_ebreak); // This should never get executed } @@ -1177,8 +1179,10 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // handler. So turn off GC reporting for this single instruction. GetEmitter()->emitDisableGC(); + BasicBlock* const jumpDest = nextBlock->bbJumpDest; + // Now go to where the finally funclet needs to return to. - if (block->bbNext->bbJumpDest == block->bbNext->bbNext) + if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-RISCV64-CQ: Can we get rid of this instruction, and just have the call return directly @@ -1188,7 +1192,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } else { - inst_JMP(EJ_jmp, block->bbNext->bbJumpDest); + inst_JMP(EJ_jmp, jumpDest); } GetEmitter()->emitEnableGC(); @@ -1201,7 +1205,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) if (!(block->bbFlags & BBF_RETLESS_CALL)) { assert(block->isBBCallAlwaysPair()); - block = block->bbNext; + block = nextBlock; } return block; } @@ -2089,14 +2093,14 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) else // the divisor is not the constant zero { GenTree* src1 = tree->gtOp1; - unsigned TypeSize = genTypeSize(genActualType(tree->TypeGet())); - emitAttr size = EA_ATTR(TypeSize); + unsigned typeSize = genTypeSize(genActualType(tree->TypeGet())); + emitAttr size = EA_ATTR(typeSize); - assert(TypeSize >= genTypeSize(genActualType(src1->TypeGet())) && - TypeSize >= genTypeSize(genActualType(divisorOp->TypeGet()))); + assert(typeSize >= genTypeSize(genActualType(src1->TypeGet())) && + typeSize >= genTypeSize(genActualType(divisorOp->TypeGet()))); // ssize_t intConstValue = divisorOp->AsIntCon()->gtIconVal; - regNumber Reg1 = src1->GetRegNum(); + regNumber reg1 = src1->GetRegNum(); regNumber divisorReg = divisorOp->GetRegNum(); instruction ins; @@ -2118,8 +2122,8 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) { assert(!divisorOp->isContainedIntOrIImmed()); ssize_t intConst = (int)(src1->AsIntCon()->gtIconVal); - Reg1 = rsGetRsvdReg(); - emit->emitLoadImmediate(EA_PTRSIZE, Reg1, intConst); + reg1 = rsGetRsvdReg(); + emit->emitLoadImmediate(EA_PTRSIZE, reg1, intConst); } } else @@ -2202,7 +2206,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) } } - emit->emitIns_R_R_R(ins, size, tree->GetRegNum(), Reg1, divisorReg); + emit->emitIns_R_R_R(ins, size, tree->GetRegNum(), reg1, divisorReg); } else // if (tree->gtOper == GT_UDIV) GT_UMOD { @@ -2232,7 +2236,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) } // TODO-RISCV64: here is just for signed-extension ? - emit->emitIns_R_R_I(INS_slliw, EA_4BYTE, Reg1, Reg1, 0); + emit->emitIns_R_R_I(INS_slliw, EA_4BYTE, reg1, reg1, 0); emit->emitIns_R_R_I(INS_slliw, EA_4BYTE, divisorReg, divisorReg, 0); } else @@ -2247,7 +2251,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* tree) } } - emit->emitIns_R_R_R(ins, size, tree->GetRegNum(), Reg1, divisorReg); + emit->emitIns_R_R_R(ins, size, tree->GetRegNum(), reg1, divisorReg); } } } @@ -3238,10 +3242,10 @@ void CodeGen::genIntToFloatCast(GenTree* treeNode) emitAttr srcSize = EA_ATTR(genTypeSize(srcType)); noway_assert((srcSize == EA_4BYTE) || (srcSize == EA_8BYTE)); - bool IsUnsigned = treeNode->gtFlags & GTF_UNSIGNED; + bool isUnsigned = treeNode->gtFlags & GTF_UNSIGNED; instruction ins = INS_invalid; - if (IsUnsigned) + if (isUnsigned) { if (dstType == TYP_DOUBLE) { @@ -3340,10 +3344,10 @@ void CodeGen::genFloatToIntCast(GenTree* treeNode) emitAttr dstSize = EA_ATTR(genTypeSize(dstType)); noway_assert((dstSize == EA_4BYTE) || (dstSize == EA_8BYTE)); - bool IsUnsigned = treeNode->gtFlags & GTF_UNSIGNED; + bool isUnsigned = varTypeIsUnsigned(dstType); instruction ins = INS_invalid; - if (IsUnsigned) + if (isUnsigned) { if (srcType == TYP_DOUBLE) { @@ -3375,7 +3379,7 @@ void CodeGen::genFloatToIntCast(GenTree* treeNode) { emitAttr attr = emitActualTypeSize(dstType); - if (IsUnsigned) + if (isUnsigned) { GetEmitter()->emitIns_R_R_I(INS_slli, attr, treeNode->GetRegNum(), treeNode->GetRegNum(), 32); GetEmitter()->emitIns_R_R_I(INS_srli, attr, treeNode->GetRegNum(), treeNode->GetRegNum(), 32); @@ -3569,7 +3573,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) assert(!op1->isContainedIntOrIImmed()); assert(tree->OperIs(GT_LT, GT_LE, GT_EQ, GT_NE, GT_GT, GT_GE)); - bool IsUnsigned = (tree->gtFlags & GTF_UNSIGNED) != 0; + bool isUnsigned = (tree->gtFlags & GTF_UNSIGNED) != 0; regNumber regOp1 = op1->GetRegNum(); if (op2->isContainedIntOrIImmed()) @@ -3579,7 +3583,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) switch (cmpSize) { case EA_4BYTE: - if (IsUnsigned) + if (isUnsigned) { imm = static_cast(imm); @@ -3603,44 +3607,44 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) if (tree->OperIs(GT_LT)) { - if (!IsUnsigned && emitter::isValidSimm12(imm)) + if (!isUnsigned && emitter::isValidSimm12(imm)) { emit->emitIns_R_R_I(INS_slti, EA_PTRSIZE, targetReg, regOp1, imm); } - else if (IsUnsigned && emitter::isValidUimm11(imm)) + else if (isUnsigned && emitter::isValidUimm11(imm)) { emit->emitIns_R_R_I(INS_sltiu, EA_PTRSIZE, targetReg, regOp1, imm); } else { emit->emitLoadImmediate(EA_PTRSIZE, REG_RA, imm); - emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, regOp1, REG_RA); + emit->emitIns_R_R_R(isUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, regOp1, REG_RA); } } else if (tree->OperIs(GT_LE)) { - if (!IsUnsigned && emitter::isValidSimm12(imm + 1)) + if (!isUnsigned && emitter::isValidSimm12(imm + 1)) { emit->emitIns_R_R_I(INS_slti, EA_PTRSIZE, targetReg, regOp1, imm + 1); } - else if (IsUnsigned && emitter::isValidUimm11(imm + 1)) + else if (isUnsigned && emitter::isValidUimm11(imm + 1)) { emit->emitIns_R_R_I(INS_sltiu, EA_PTRSIZE, targetReg, regOp1, imm + 1); } else { emit->emitLoadImmediate(EA_PTRSIZE, REG_RA, imm + 1); - emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, regOp1, REG_RA); + emit->emitIns_R_R_R(isUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, regOp1, REG_RA); } } else if (tree->OperIs(GT_GT)) { - if (!IsUnsigned && emitter::isValidSimm12(imm + 1)) + if (!isUnsigned && emitter::isValidSimm12(imm + 1)) { emit->emitIns_R_R_I(INS_slti, EA_PTRSIZE, targetReg, regOp1, imm + 1); emit->emitIns_R_R_I(INS_xori, EA_PTRSIZE, targetReg, targetReg, 1); } - else if (IsUnsigned && emitter::isValidUimm11(imm + 1)) + else if (isUnsigned && emitter::isValidUimm11(imm + 1)) { emit->emitIns_R_R_I(INS_sltiu, EA_PTRSIZE, targetReg, regOp1, imm + 1); emit->emitIns_R_R_I(INS_xori, EA_PTRSIZE, targetReg, targetReg, 1); @@ -3648,23 +3652,23 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) else { emit->emitLoadImmediate(EA_PTRSIZE, REG_RA, imm); - emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, REG_RA, regOp1); + emit->emitIns_R_R_R(isUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, REG_RA, regOp1); } } else if (tree->OperIs(GT_GE)) { - if (!IsUnsigned && emitter::isValidSimm12(imm)) + if (!isUnsigned && emitter::isValidSimm12(imm)) { emit->emitIns_R_R_I(INS_slti, EA_PTRSIZE, targetReg, regOp1, imm); } - else if (IsUnsigned && emitter::isValidUimm11(imm)) + else if (isUnsigned && emitter::isValidUimm11(imm)) { emit->emitIns_R_R_I(INS_sltiu, EA_PTRSIZE, targetReg, regOp1, imm); } else { emit->emitLoadImmediate(EA_PTRSIZE, REG_RA, imm); - emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, regOp1, REG_RA); + emit->emitIns_R_R_R(isUnsigned ? INS_sltu : INS_slt, EA_PTRSIZE, targetReg, regOp1, REG_RA); } emit->emitIns_R_R_I(INS_xori, EA_PTRSIZE, targetReg, targetReg, 1); } @@ -3716,7 +3720,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) assert(regOp1 != tmpRegOp2); assert(regOp2 != tmpRegOp2); - if (IsUnsigned) + if (isUnsigned) { emit->emitIns_R_R_I(INS_slli, EA_8BYTE, tmpRegOp1, regOp1, 32); emit->emitIns_R_R_I(INS_srli, EA_8BYTE, tmpRegOp1, tmpRegOp1, 32); @@ -3736,20 +3740,20 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) if (tree->OperIs(GT_LT)) { - emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_8BYTE, targetReg, regOp1, regOp2); + emit->emitIns_R_R_R(isUnsigned ? INS_sltu : INS_slt, EA_8BYTE, targetReg, regOp1, regOp2); } else if (tree->OperIs(GT_LE)) { - emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_8BYTE, targetReg, regOp2, regOp1); + emit->emitIns_R_R_R(isUnsigned ? INS_sltu : INS_slt, EA_8BYTE, targetReg, regOp2, regOp1); emit->emitIns_R_R_I(INS_xori, EA_PTRSIZE, targetReg, targetReg, 1); } else if (tree->OperIs(GT_GT)) { - emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_8BYTE, targetReg, regOp2, regOp1); + emit->emitIns_R_R_R(isUnsigned ? INS_sltu : INS_slt, EA_8BYTE, targetReg, regOp2, regOp1); } else if (tree->OperIs(GT_GE)) { - emit->emitIns_R_R_R(IsUnsigned ? INS_sltu : INS_slt, EA_8BYTE, targetReg, regOp1, regOp2); + emit->emitIns_R_R_R(isUnsigned ? INS_sltu : INS_slt, EA_8BYTE, targetReg, regOp1, regOp2); emit->emitIns_R_R_I(INS_xori, EA_PTRSIZE, targetReg, targetReg, 1); } else if (tree->OperIs(GT_NE)) @@ -7005,7 +7009,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) // addressing mode instruction. Currently we're 'cheating' by producing one or more // instructions to generate the addressing mode so we need to modify lowering to // produce LEAs that are a 1:1 relationship to the RISCV64 architecture. - if (lea->Base() && lea->Index()) + if (lea->HasBase() && lea->HasIndex()) { GenTree* memBase = lea->Base(); GenTree* index = lea->Index(); @@ -7050,7 +7054,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) } } } - else if (lea->Base()) + else if (lea->HasBase()) { GenTree* memBase = lea->Base(); @@ -7081,7 +7085,7 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg); } } - else if (lea->Index()) + else if (lea->HasIndex()) { // If we encounter a GT_LEA node without a base it means it came out // when attempting to optimize an arbitrary arithmetic expression during lower. @@ -7534,10 +7538,10 @@ void CodeGen::genPopCalleeSavedRegisters(bool jmpEpilog) { if (compiler->compLocallocUsed) { - int SPtoFPdelta = genSPtoFPdelta(); + int spToFpDelta = genSPtoFPdelta(); // Restore sp from fp - GetEmitter()->emitIns_R_R_I(INS_addi, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -SPtoFPdelta); - compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta); + GetEmitter()->emitIns_R_R_I(INS_addi, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, -spToFpDelta); + compiler->unwindSetFrameReg(REG_FPBASE, spToFpDelta); } JITDUMP("Frame type 1(save FP/RA at bottom). #outsz=%d; #framesz=%d; localloc? %s\n", @@ -7676,11 +7680,22 @@ void CodeGen::genFnPrologCalleeRegArgs() noway_assert(regArgMaskLive != 0); unsigned varNum; - unsigned regArgMaskIsInt = 0; - unsigned regArgNum = 0; - // Process any circular dependencies - unsigned regArg[MAX_REG_ARG * 2] = {0}; - unsigned regArgInit[MAX_REG_ARG * 2] = {0}; + unsigned regArgNum = 0; + // Process any rearrangements including circular dependencies. + regNumber regArg[MAX_REG_ARG + MAX_FLOAT_REG_ARG]; + regNumber regArgInit[MAX_REG_ARG + MAX_FLOAT_REG_ARG]; + emitAttr regArgAttr[MAX_REG_ARG + MAX_FLOAT_REG_ARG]; + + for (int i = 0; i < MAX_REG_ARG + MAX_FLOAT_REG_ARG; i++) + { + regArg[i] = REG_NA; + +#ifdef DEBUG + regArgInit[i] = REG_NA; + regArgAttr[i] = EA_UNKNOWN; +#endif + } + for (varNum = 0; varNum < compiler->lvaCount; ++varNum) { LclVarDsc* varDsc = compiler->lvaTable + varNum; @@ -7736,20 +7751,19 @@ void CodeGen::genFnPrologCalleeRegArgs() { if (genIsValidIntReg(varDsc->GetArgReg())) { - assert(varDsc->GetArgReg() >= REG_ARG_FIRST && varDsc->GetArgReg() <= REG_ARG_LAST); + assert(isValidIntArgReg(varDsc->GetArgReg())); regArg[varDsc->GetArgReg() - REG_ARG_FIRST] = varDsc->GetArgReg(); regArgInit[varDsc->GetArgReg() - REG_ARG_FIRST] = varDsc->GetArgInitReg(); - if (varDsc->TypeGet() == TYP_INT) - { - regArgMaskIsInt = 1 << (unsigned)varDsc->GetArgReg(); - } + regArgAttr[varDsc->GetArgReg() - REG_ARG_FIRST] = + varDsc->TypeGet() == TYP_INT ? EA_4BYTE : EA_PTRSIZE; } else { - assert(genIsValidFloatReg(varDsc->GetArgReg())); - assert(varDsc->GetArgReg() >= REG_ARG_FP_FIRST && varDsc->GetArgReg() <= REG_ARG_FP_LAST); - regArg[(varDsc->GetArgReg() - REG_ARG_FP_FIRST) | 0x8] = varDsc->GetArgReg(); - regArgInit[(varDsc->GetArgReg() - REG_ARG_FP_FIRST) | 0x8] = varDsc->GetArgInitReg(); + assert(isValidFloatArgReg(varDsc->GetArgReg())); + regArg[varDsc->GetArgReg() - REG_ARG_FP_FIRST + MAX_REG_ARG] = varDsc->GetArgReg(); + regArgInit[varDsc->GetArgReg() - REG_ARG_FP_FIRST + MAX_REG_ARG] = varDsc->GetArgInitReg(); + regArgAttr[varDsc->GetArgReg() - REG_ARG_FP_FIRST + MAX_REG_ARG] = + varDsc->TypeGet() == TYP_FLOAT ? EA_4BYTE : EA_PTRSIZE; } regArgNum++; } @@ -7771,9 +7785,11 @@ void CodeGen::genFnPrologCalleeRegArgs() } else { - assert(genIsValidFloatReg(varDsc->GetArgReg())); - regArg[(varDsc->GetArgReg() & 7) | 0x8] = varDsc->GetArgReg(); - regArgInit[(varDsc->GetArgReg() & 7) | 0x8] = varDsc->GetArgInitReg(); + assert(isValidFloatArgReg(varDsc->GetArgReg())); + regArg[varDsc->GetArgReg() - REG_ARG_FP_FIRST + MAX_REG_ARG] = varDsc->GetArgReg(); + regArgInit[varDsc->GetArgReg() - REG_ARG_FP_FIRST + MAX_REG_ARG] = varDsc->GetArgInitReg(); + regArgAttr[varDsc->GetArgReg() - REG_ARG_FP_FIRST + MAX_REG_ARG] = + varDsc->TypeGet() == TYP_FLOAT ? EA_4BYTE : EA_PTRSIZE; regArgNum++; } } @@ -7983,22 +7999,17 @@ void CodeGen::genFnPrologCalleeRegArgs() if (regArgNum > 0) { - for (int i = MAX_REG_ARG - 1; i >= 0; i--) + for (int i = MAX_REG_ARG + MAX_FLOAT_REG_ARG - 1; i >= 0; i--) { - if (regArg[i] > 0 && (regArgInit[i] <= REG_S1 || regArgInit[i] > REG_A7)) + if (regArg[i] != REG_NA && !isValidIntArgReg(regArgInit[i]) && !isValidFloatArgReg(regArgInit[i])) { - instruction ins; - if ((regArgMaskIsInt & (1 << regArg[i])) != 0) - { - ins = INS_slliw; - } - else - { - ins = INS_ori; - } - GetEmitter()->emitIns_R_R_I(ins, EA_PTRSIZE, (regNumber)regArgInit[i], (regNumber)regArg[i], 0); - regArgMaskLive &= ~genRegMask((regNumber)regArg[i]); - regArg[i] = 0; + assert(regArg[i] != regArgInit[i]); + assert(isValidIntArgReg(regArg[i]) || isValidFloatArgReg(regArg[i])); + + GetEmitter()->emitIns_Mov(regArgAttr[i], regArgInit[i], regArg[i], false); + + regArgMaskLive &= ~genRegMask(regArg[i]); + regArg[i] = REG_NA; regArgNum -= 1; } } @@ -8006,170 +8017,86 @@ void CodeGen::genFnPrologCalleeRegArgs() if (regArgNum > 0) { - instruction ins; - for (int i = MAX_REG_ARG - 1; i >= 0; i--) + for (int i = MAX_REG_ARG + MAX_FLOAT_REG_ARG - 1; i >= 0; i--) { - if (regArg[i] > 0) + if (regArg[i] != REG_NA) { - assert(genIsValidIntReg((regNumber)regArg[i])); - assert(genIsValidIntReg((regNumber)regArgInit[i])); + assert(regArg[i] != regArgInit[i]); - regArgNum--; - regArgMaskLive &= ~genRegMask((regNumber)regArg[i]); - if ((regArgMaskIsInt & (1 << regArg[i])) != 0) - { - ins = INS_slliw; - } - else - { - ins = INS_ori; - } + // regArg indexes list + unsigned indexList[MAX_REG_ARG + MAX_FLOAT_REG_ARG]; + int count = 0; // Number of nodes in list + bool loop = false; // List has a loop - if (regArgNum == 0) + for (unsigned cur = i; regArg[cur] != REG_NA; count++) { - GetEmitter()->emitIns_R_R_I(ins, EA_PTRSIZE, (regNumber)regArgInit[i], (regNumber)regArg[i], 0); - break; - } - else if (regArgInit[i] > regArg[i]) - { - GetEmitter()->emitIns_R_R_I(ins, EA_PTRSIZE, (regNumber)regArgInit[i], (regNumber)regArg[i], 0); - } - else - { - assert(i > 0); - assert(regArgNum > 0); + if (cur == i && count > 0) + { + loop = true; + break; + } + + indexList[count] = cur; - int j = regArgInit[i] - REG_ARG_FIRST; - assert((j >= 0) && (j < MAX_REG_ARG)); - if (regArg[j] == 0) + for (int count2 = 0; count2 < count; count2++) + { + // The list could not have backlinks except last to first case which handled above. + assert(cur != indexList[count2] && "Attempt to move several values on same register."); + } + assert(cur < MAX_REG_ARG + MAX_FLOAT_REG_ARG); + assert(isValidIntArgReg(regArg[cur]) || isValidFloatArgReg(regArg[cur])); + + if (isValidIntArgReg(regArgInit[cur])) + { + cur = regArgInit[cur] - REG_ARG_FIRST; + } + else if (isValidFloatArgReg(regArgInit[cur])) { - GetEmitter()->emitIns_R_R_I(ins, EA_PTRSIZE, (regNumber)regArgInit[i], (regNumber)regArg[i], 0); + cur = regArgInit[cur] - REG_ARG_FP_FIRST + MAX_REG_ARG; } else { - int k = regArgInit[j] - REG_ARG_FIRST; - // assert((k >= 0) && (k < MAX_REG_ARG)); - instruction ins2 = (regArgMaskIsInt & (1 << regArg[j])) != 0 ? INS_slliw : INS_ori; - if ((regArg[k] == 0) || (k > i) || k < 0) - { - GetEmitter()->emitIns_R_R_I(ins2, EA_PTRSIZE, (regNumber)regArgInit[j], - (regNumber)regArg[j], 0); - GetEmitter()->emitIns_R_R_I(ins, EA_PTRSIZE, (regNumber)regArgInit[i], (regNumber)regArg[i], - 0); - regArgNum--; - regArgMaskLive &= ~genRegMask((regNumber)regArg[j]); - regArg[j] = 0; - } - else if (k == i) - { - GetEmitter()->emitIns_R_R_I(ins, EA_PTRSIZE, rsGetRsvdReg(), (regNumber)regArg[i], 0); - GetEmitter()->emitIns_R_R_I(ins2, EA_PTRSIZE, (regNumber)regArgInit[j], - (regNumber)regArg[j], 0); - GetEmitter()->emitIns_R_R_I(INS_ori, EA_PTRSIZE, (regNumber)regArgInit[i], rsGetRsvdReg(), - 0); - regArgNum--; - regArgMaskLive &= ~genRegMask((regNumber)regArg[j]); - regArg[j] = 0; - } - else - { - NYI_RISCV64("-----------CodeGen::genFnPrologCalleeRegArgs() error!--"); - } - - if (regArgNum == 0) - { - break; - } + assert(!"Argument register is neither valid float nor valid int argument register"); } } - } - } - if (regArgNum > 0) - { - for (int i = MAX_REG_ARG + MAX_FLOAT_REG_ARG - 1; i >= MAX_REG_ARG; i--) - { - if (regArg[i] > 0) + if (loop) + { + unsigned tmpArg = indexList[count - 1]; + + GetEmitter()->emitIns_Mov(regArgAttr[tmpArg], rsGetRsvdReg(), regArg[tmpArg], false); + count--; // Decrease count to not access last node which regArgInit points to start node i + assert(count > 0); + } + + for (int cur = count - 1; cur >= 0; cur--) { - assert(genIsValidFloatReg((regNumber)regArg[i])); + unsigned tmpArg = indexList[cur]; - instruction ins = genIsValidIntReg((regNumber)regArgInit[i]) ? INS_fmv_x_d : INS_fsgnj_d; + GetEmitter()->emitIns_Mov(regArgAttr[tmpArg], regArgInit[tmpArg], regArg[tmpArg], false); + regArgMaskLive &= ~genRegMask(regArg[tmpArg]); + regArg[tmpArg] = REG_NA; regArgNum--; - regArgMaskLive &= ~genRegMask((regNumber)regArg[i]); - if (regArgNum == 0) - { - GetEmitter()->emitIns_Mov(ins, EA_PTRSIZE, (regNumber)regArgInit[i], (regNumber)regArg[i], - true); - break; - } - else if (regArgInit[i] > regArg[i] || (regArgInit[i] <= REG_F9)) - { - GetEmitter()->emitIns_R_R_R(INS_fsgnj_d, EA_PTRSIZE, (regNumber)regArgInit[i], - (regNumber)regArg[i], (regNumber)regArg[i]); - } - else - { - assert(i > MAX_REG_ARG); - assert(regArgNum > 0); + assert(regArgNum >= 0); + }; - int j = genIsValidIntReg((regNumber)regArgInit[i]) - ? (regArgInit[i] - REG_ARG_FIRST) - : ((((int)regArgInit[i]) - REG_ARG_FP_FIRST) + 0x8); - if (j < MAX_REG_ARG || regArg[j] == 0) - { - GetEmitter()->emitIns_Mov(ins, EA_PTRSIZE, (regNumber)regArgInit[i], (regNumber)regArg[i], - true); - } - else - { - // NOTE: Not support the int-register case. - assert(genIsValidFloatReg((regNumber)regArg[j])); - assert(genIsValidFloatReg((regNumber)regArgInit[j])); + if (loop) + { + unsigned tmpArg = indexList[count]; // count was decreased for loop case - int k = (((int)regArgInit[j]) - REG_ARG_FP_FIRST) + 0x8; - if ((regArg[k] == 0) || (k > i) || (k < MAX_REG_ARG)) - { - GetEmitter()->emitIns_R_R_R(INS_fsgnj_d, EA_PTRSIZE, (regNumber)regArgInit[j], - (regNumber)regArg[j], (regNumber)regArg[j]); - GetEmitter()->emitIns_R_R_R(INS_fsgnj_d, EA_PTRSIZE, (regNumber)regArgInit[i], - (regNumber)regArg[i], (regNumber)regArg[i]); - regArgNum--; - regArgMaskLive &= ~genRegMask((regNumber)regArg[j]); - regArg[j] = 0; - if (regArgNum == 0) - { - break; - } - } - else if (k == i) - { - GetEmitter()->emitIns_R_R_R(INS_fsgnj_d, EA_PTRSIZE, REG_SCRATCH_FLT, - (regNumber)regArg[i], (regNumber)regArg[i]); - GetEmitter()->emitIns_R_R_R(INS_fsgnj_d, EA_PTRSIZE, (regNumber)regArgInit[j], - (regNumber)regArg[j], (regNumber)regArg[j]); - GetEmitter()->emitIns_R_R_R(INS_fsgnj_d, EA_PTRSIZE, (regNumber)regArgInit[i], - REG_SCRATCH_FLT, REG_SCRATCH_FLT); - regArgNum--; - regArgMaskLive &= ~genRegMask((regNumber)regArg[j]); - regArg[j] = 0; - if (regArgNum == 0) - { - break; - } - } - else - { - NYI_RISCV64("-----------CodeGen::genFnPrologCalleeRegArgs() error!--"); - } - } - } + GetEmitter()->emitIns_Mov(regArgAttr[i], regArg[tmpArg], rsGetRsvdReg(), false); + + regArgMaskLive &= ~genRegMask(regArg[tmpArg]); + regArg[tmpArg] = REG_NA; + regArgNum--; } + assert(regArgNum >= 0); } } - assert(regArgNum == 0); } + assert(regArgNum == 0); assert(!regArgMaskLive); } diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 10e03e6952148..6ff22fcf66d81 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -205,6 +205,8 @@ void CodeGen::genEmitGSCookieCheck(bool pushReg) BasicBlock* CodeGen::genCallFinally(BasicBlock* block) { + BasicBlock* const nextBlock = block->bbNext; + #if defined(FEATURE_EH_FUNCLETS) // Generate a call to the finally, like this: // mov rcx,qword ptr [rbp + 20H] // Load rcx with PSPSym @@ -235,7 +237,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) // block), then we need to generate a breakpoint here (since it will never // get executed) to get proper unwind behavior. - if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext)) + if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock)) { instGen(INS_BREAKPOINT); // This should never get executed } @@ -251,8 +253,10 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) GetEmitter()->emitDisableGC(); #endif // JIT32_GCENCODER + BasicBlock* const jumpDest = nextBlock->bbJumpDest; + // Now go to where the finally funclet needs to return to. - if (block->bbNext->bbJumpDest == block->bbNext->bbNext) + if ((jumpDest == nextBlock->bbNext) && !compiler->fgInDifferentRegions(nextBlock, jumpDest)) { // Fall-through. // TODO-XArch-CQ: Can we get rid of this instruction, and just have the call return directly @@ -262,7 +266,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) } else { - inst_JMP(EJ_jmp, block->bbNext->bbJumpDest); + inst_JMP(EJ_jmp, jumpDest); } #ifndef JIT32_GCENCODER @@ -312,7 +316,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) if (!(block->bbFlags & BBF_RETLESS_CALL)) { assert(block->isBBCallAlwaysPair()); - GetEmitter()->emitIns_J(INS_push_hide, block->bbNext->bbJumpDest); + GetEmitter()->emitIns_J(INS_push_hide, nextBlock->bbJumpDest); } else { @@ -332,7 +336,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) if (!(block->bbFlags & BBF_RETLESS_CALL)) { assert(block->isBBCallAlwaysPair()); - block = block->bbNext; + block = nextBlock; } return block; } @@ -6664,17 +6668,17 @@ void CodeGen::genLeaInstruction(GenTreeAddrMode* lea) emitAttr size = emitTypeSize(lea); genConsumeOperands(lea); - if (lea->Base() && lea->Index()) + if (lea->HasBase() && lea->HasIndex()) { regNumber baseReg = lea->Base()->GetRegNum(); regNumber indexReg = lea->Index()->GetRegNum(); GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), baseReg, indexReg, lea->gtScale, lea->Offset()); } - else if (lea->Base()) + else if (lea->HasBase()) { GetEmitter()->emitIns_R_AR(INS_lea, size, lea->GetRegNum(), lea->Base()->GetRegNum(), lea->Offset()); } - else if (lea->Index()) + else if (lea->HasIndex()) { GetEmitter()->emitIns_R_ARX(INS_lea, size, lea->GetRegNum(), REG_NA, lea->Index()->GetRegNum(), lea->gtScale, lea->Offset()); diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index b5c08a34c3011..c98b1331bb831 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -9897,6 +9897,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX STRESS_MODE(NO_OLD_PROMOTION) /* Do not use old promotion */ \ STRESS_MODE(PHYSICAL_PROMOTION) /* Use physical promotion */ \ STRESS_MODE(PHYSICAL_PROMOTION_COST) \ + STRESS_MODE(UNWIND) /* stress unwind info; e.g., create function fragments */ \ \ /* After COUNT_VARN, stress level 2 does all of these all the time */ \ \ diff --git a/src/coreclr/jit/earlyprop.cpp b/src/coreclr/jit/earlyprop.cpp index b422c79a37ba3..8233f8f521a41 100644 --- a/src/coreclr/jit/earlyprop.cpp +++ b/src/coreclr/jit/earlyprop.cpp @@ -454,7 +454,7 @@ bool Compiler::optFoldNullCheck(GenTree* tree, LocalNumberToNullCheckTreeMap* nu nullCheckTree->gtFlags &= ~(GTF_EXCEPT | GTF_DONT_CSE); // Set this flag to prevent reordering - nullCheckTree->gtFlags |= GTF_ORDER_SIDEEFF; + nullCheckTree->SetHasOrderingSideEffect(); nullCheckTree->gtFlags |= GTF_IND_NONFAULTING; if (nullCheckParent != nullptr) diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index edfe30a3026f6..51772e889a12e 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -561,6 +561,34 @@ void emitter::emitIns_Mov( } } +void emitter::emitIns_Mov(emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip) +{ + if (!canSkip || dstReg != srcReg) + { + assert(attr == EA_4BYTE || attr == EA_PTRSIZE); + if (isGeneralRegisterOrR0(dstReg) && isGeneralRegisterOrR0(srcReg)) + { + emitIns_R_R_I(attr == EA_4BYTE ? INS_addiw : INS_addi, attr, dstReg, srcReg, 0); + } + else if (isGeneralRegisterOrR0(dstReg) && genIsValidFloatReg(srcReg)) + { + emitIns_R_R(attr == EA_4BYTE ? INS_fmv_x_w : INS_fmv_x_d, attr, dstReg, srcReg); + } + else if (genIsValidFloatReg(dstReg) && isGeneralRegisterOrR0(srcReg)) + { + emitIns_R_R(attr == EA_4BYTE ? INS_fmv_w_x : INS_fmv_d_x, attr, dstReg, srcReg); + } + else if (genIsValidFloatReg(dstReg) && genIsValidFloatReg(srcReg)) + { + emitIns_R_R_R(attr == EA_4BYTE ? INS_fsgnj_s : INS_fsgnj_d, attr, dstReg, srcReg, srcReg); + } + else + { + assert(!"Invalid registers in emitIns_Mov()\n"); + } + } +} + /***************************************************************************** * * Add an instruction referencing two registers diff --git a/src/coreclr/jit/emitriscv64.h b/src/coreclr/jit/emitriscv64.h index a572ee9d2cf8c..ce207755efc4d 100644 --- a/src/coreclr/jit/emitriscv64.h +++ b/src/coreclr/jit/emitriscv64.h @@ -171,6 +171,8 @@ void emitIns_R_I(instruction ins, emitAttr attr, regNumber reg, ssize_t imm, ins void emitIns_Mov( instruction ins, emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip, insOpts opt = INS_OPTS_NONE); +void emitIns_Mov(emitAttr attr, regNumber dstReg, regNumber srcReg, bool canSkip = false); + void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insOpts opt = INS_OPTS_NONE); void emitIns_R_R(instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, insFlags flags) diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index d42175f98300e..f0ffca4a5ef08 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -1427,7 +1427,7 @@ bool CallArg::IsUserArg() const void CallArg::CheckIsStruct() { GenTree* node = GetNode(); - if (AbiInfo.IsStruct) + if (varTypeIsStruct(GetSignatureType())) { if (!varTypeIsStruct(node) && !node->OperIs(GT_FIELD_LIST)) { @@ -6731,7 +6731,7 @@ GenTree* GenTree::gtGetParent(GenTree*** pUse) // of the children's flags. // -bool GenTree::OperRequiresAsgFlag() +bool GenTree::OperRequiresAsgFlag() const { switch (OperGet()) { @@ -6769,8 +6769,7 @@ bool GenTree::OperRequiresAsgFlag() // OperRequiresCallFlag : Check whether the operation requires GTF_CALL flag regardless // of the children's flags. // - -bool GenTree::OperRequiresCallFlag(Compiler* comp) +bool GenTree::OperRequiresCallFlag(Compiler* comp) const { switch (gtOper) { @@ -7026,6 +7025,160 @@ bool GenTree::OperMayThrow(Compiler* comp) return OperExceptions(comp) != ExceptionSetFlags::None; } +//------------------------------------------------------------------------------ +// OperRequiresGlobRefFlag : Check whether the operation requires GTF_GLOB_REF +// flag regardless of the children's flags. +// +// Arguments: +// comp - Compiler instance +// +// Return Value: +// True if the given operator requires GTF_GLOB_REF +// +// Remarks: +// Globally visible stores and loads, as well as some equivalently modeled +// operations, require the GLOB_REF flag to be set on the node. +// +// This function is only valid after local morph when we know which locals +// are address exposed, and the flag in gtFlags is only kept valid after +// morph has run. Before local morph the property can be conservatively +// approximated for locals with lvHasLdAddrOp. +// +bool GenTree::OperRequiresGlobRefFlag(Compiler* comp) const +{ + switch (OperGet()) + { + case GT_LCL_VAR: + case GT_LCL_FLD: + case GT_STORE_LCL_VAR: + case GT_STORE_LCL_FLD: + return comp->lvaGetDesc(AsLclVarCommon())->IsAddressExposed(); + + case GT_IND: + case GT_BLK: + if (AsIndir()->IsInvariantLoad()) + { + return false; + } + FALLTHROUGH; + + case GT_STOREIND: + case GT_STORE_BLK: + case GT_STORE_DYN_BLK: + case GT_XADD: + case GT_XORR: + case GT_XAND: + case GT_XCHG: + case GT_LOCKADD: + case GT_CMPXCHG: + case GT_MEMORYBARRIER: + case GT_KEEPALIVE: + return true; + + case GT_CALL: + return AsCall()->HasSideEffects(comp, /* ignoreExceptions */ true); + + case GT_ALLOCOBJ: + return AsAllocObj()->gtHelperHasSideEffects; + +#if defined(FEATURE_HW_INTRINSICS) + case GT_HWINTRINSIC: + return AsHWIntrinsic()->OperRequiresGlobRefFlag(); +#endif // FEATURE_HW_INTRINSICS + + default: + assert(!OperRequiresCallFlag(comp) || OperIs(GT_INTRINSIC)); + assert((!OperIsIndir() || OperIs(GT_NULLCHECK)) && !OperRequiresAsgFlag()); + return false; + } +} + +//------------------------------------------------------------------------------ +// OperSupportsOrderingSideEffect : Check whether the operation supports the +// GTF_ORDER_SIDEEFF flag. +// +// Return Value: +// True if the given operator supports GTF_ORDER_SIDEEFF. +// +// Remarks: +// A node will still have this flag set if an operand has it set, even if the +// parent does not support it. This situation indicates that reordering the +// parent may be ok as long as it does not break ordering dependencies of the +// operand. +// +bool GenTree::OperSupportsOrderingSideEffect() const +{ + if (TypeIs(TYP_BYREF)) + { + // Forming byrefs may only be legal due to previous checks. + return true; + } + + switch (OperGet()) + { + case GT_BOUNDS_CHECK: + case GT_IND: + case GT_BLK: + case GT_STOREIND: + case GT_NULLCHECK: + case GT_STORE_BLK: + case GT_STORE_DYN_BLK: + case GT_XADD: + case GT_XORR: + case GT_XAND: + case GT_XCHG: + case GT_LOCKADD: + case GT_CMPXCHG: + case GT_MEMORYBARRIER: + case GT_CATCH_ARG: + return true; + default: + return false; + } +} + +//------------------------------------------------------------------------------ +// OperEffects: Compute effect flags that are relevant to this node only, +// excluding its children. +// +// Arguments: +// comp - Compiler instance +// +// Return Value: +// The effect flags. +// +GenTreeFlags GenTree::OperEffects(Compiler* comp) +{ + GenTreeFlags flags = gtFlags & GTF_ALL_EFFECT; + + if (((flags & GTF_ASG) != 0) && !OperRequiresAsgFlag()) + { + flags &= ~GTF_ASG; + } + + if (((flags & GTF_CALL) != 0) && !OperRequiresCallFlag(comp)) + { + flags &= ~GTF_CALL; + } + + if (((flags & GTF_EXCEPT) != 0) && !OperMayThrow(comp)) + { + flags &= ~GTF_EXCEPT; + } + + if (((flags & GTF_GLOB_REF) != 0) && !OperRequiresGlobRefFlag(comp)) + { + flags &= ~GTF_GLOB_REF; + } + + if (((flags & GTF_ORDER_SIDEEFF) != 0) && !OperSupportsOrderingSideEffect()) + { + flags &= ~GTF_ORDER_SIDEEFF; + } + + return flags; +} + //----------------------------------------------------------------------------------- // GetFieldCount: Return the register count for a multi-reg lclVar. // @@ -8177,7 +8330,7 @@ void Compiler::gtInitializeIndirNode(GenTreeIndir* indir, GenTreeFlags indirFlag } if ((indirFlags & GTF_IND_VOLATILE) != 0) { - indir->gtFlags |= GTF_ORDER_SIDEEFF; + indir->SetHasOrderingSideEffect(); } } @@ -10480,7 +10633,13 @@ void GenTree::SetIndirExceptionFlags(Compiler* comp) printf("%c", (flags & GTF_SET_FLAGS) ? 'S' : '-'); ++charsDisplayed; #endif - printf("%c", (flags & GTF_SPILLED) ? 'z' : (flags & GTF_SPILL) ? 'Z' : '-'); + + // Both GTF_SPILL and GTF_SPILLED: '#' + // Only GTF_SPILLED: 'z' + // Only GTF_SPILL: 'Z' + printf("%c", ((flags & (GTF_SPILL | GTF_SPILLED)) == (GTF_SPILL | GTF_SPILLED)) + ? '#' + : ((flags & GTF_SPILLED) ? 'z' : ((flags & GTF_SPILL) ? 'Z' : '-'))); return charsDisplayed; } @@ -10615,11 +10774,11 @@ void Compiler::gtDispNodeName(GenTree* tree) { GenTreeAddrMode* lea = tree->AsAddrMode(); bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), " %s(", name); - if (lea->Base() != nullptr) + if (lea->HasBase()) { bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "b+"); } - if (lea->Index() != nullptr) + if (lea->HasIndex()) { bufp += SimpleSprintf_s(bufp, buf, sizeof(buf), "(i*%d)+", lea->gtScale); } @@ -11035,8 +11194,8 @@ void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_ printf("%c", (flags & GTF_UNSIGNED ) ? 'U' : (flags & GTF_BOOLEAN ) ? 'B' : '-'); printf("%c", (flags & GTF_SET_FLAGS ) ? 'S' : '-'); - printf("%c", (flags & GTF_SPILLED ) ? 'z' : '-'); - printf("%c", (flags & GTF_SPILL ) ? 'Z' : '-'); + printf("%c", ((flags & (GTF_SPILL | GTF_SPILLED)) == (GTF_SPILL | GTF_SPILLED)) ? '#' : ((flags & + GTF_SPILLED) ? 'z' : ((flags & GTF_SPILL) ? 'Z' : '-'))); */ } @@ -11058,7 +11217,8 @@ void Compiler::gtDispNode(GenTree* tree, IndentStack* indentStack, _In_ _In_opt_ { const size_t bufLength = msgLength - 1; msg = reinterpret_cast(_alloca(bufLength * sizeof(char))); - sprintf_s(const_cast(msg), bufLength, "t%d = %s", tree->gtTreeID, hasOperands ? "" : " "); + sprintf_s(const_cast(msg), bufLength, "%c%d = %s", tree->IsUnusedValue() ? 'u' : 't', tree->gtTreeID, + hasOperands ? "" : " "); } } @@ -11263,7 +11423,7 @@ unsigned Compiler::gtDispMultiRegCount(GenTree* tree) // gtDispRegVal: Print the register(s) defined by the given node // // Arguments: -// tree - Gentree node whose registers we want to print +// tree - GenTree node whose registers we want to print // void Compiler::gtDispRegVal(GenTree* tree) { @@ -15869,8 +16029,8 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree) // was successful - instead use one of the operands, e.g. op1. // Don't fold overflow operations if not global morph phase. - // The reason for this is that this optimization is replacing a gentree node - // with another new gentree node. Say a GT_CALL(arglist) has one 'arg' + // The reason for this is that this optimization is replacing a GenTree node + // with another new GenTree node. Say a GT_CALL(arglist) has one 'arg' // involving overflow arithmetic. During assertion prop, it is possible // that the 'arg' could be constant folded and the result could lead to an // overflow. In such a case 'arg' will get replaced with GT_COMMA node @@ -25338,6 +25498,15 @@ bool GenTreeHWIntrinsic::OperRequiresCallFlag() const return false; } +//------------------------------------------------------------------------------ +// OperRequiresGlobRefFlag : Check whether the operation requires GTF_GLOB_REF +// flag regardless of the children's flags. +// +bool GenTreeHWIntrinsic::OperRequiresGlobRefFlag() const +{ + return OperIsMemoryLoad() || OperRequiresAsgFlag() || OperRequiresCallFlag(); +} + //------------------------------------------------------------------------ // GetLayout: Get the layout for this TYP_STRUCT HWI node. // diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index dc115bd491e68..525d55d94ecf4 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -1852,12 +1852,18 @@ struct GenTree // Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node inline bool IsCopyOrReloadOfMultiRegCall() const; - bool OperRequiresAsgFlag(); + bool OperRequiresAsgFlag() const; - bool OperRequiresCallFlag(Compiler* comp); + bool OperRequiresCallFlag(Compiler* comp) const; - bool OperMayThrow(Compiler* comp); ExceptionSetFlags OperExceptions(Compiler* comp); + bool OperMayThrow(Compiler* comp); + + bool OperRequiresGlobRefFlag(Compiler* comp) const; + + bool OperSupportsOrderingSideEffect() const; + + GenTreeFlags OperEffects(Compiler* comp); unsigned GetScaleIndexMul(); unsigned GetScaleIndexShf(); @@ -2152,6 +2158,12 @@ struct GenTree gtFlags |= sourceFlags; } + void SetHasOrderingSideEffect() + { + assert(OperSupportsOrderingSideEffect()); + gtFlags |= GTF_ORDER_SIDEEFF; + } + inline bool IsCnsIntOrI() const; inline bool IsIntegralConst() const; @@ -3504,6 +3516,11 @@ struct GenTreeLclVarCommon : public GenTreeUnOp return m_ssaNum.IsComposite(); } + bool HasSsaIdentity() const + { + return !m_ssaNum.IsInvalid(); + } + #if DEBUGGABLE_GENTREE GenTreeLclVarCommon() : GenTreeUnOp() { @@ -4347,7 +4364,6 @@ struct CallArgABIInformation #endif , ArgType(TYP_UNDEF) , IsBackFilled(false) - , IsStruct(false) , PassedByRef(false) #if FEATURE_ARG_SPLIT , m_isSplit(false) @@ -4396,8 +4412,6 @@ struct CallArgABIInformation // True when the argument fills a register slot skipped due to alignment // requirements of previous arguments. bool IsBackFilled : 1; - // True if this is a struct arg - bool IsStruct : 1; // True iff the argument is passed by reference. bool PassedByRef : 1; @@ -6252,6 +6266,7 @@ struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic bool OperRequiresAsgFlag() const; bool OperRequiresCallFlag() const; + bool OperRequiresGlobRefFlag() const; unsigned GetResultOpNumForRmwIntrinsic(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3); @@ -7155,7 +7170,6 @@ struct GenTreeIndir : public GenTreeOp return gtOp2; } - // these methods provide an interface to the indirection node which bool HasBase(); bool HasIndex(); GenTree* Base(); @@ -7181,6 +7195,14 @@ struct GenTreeIndir : public GenTreeOp return (gtFlags & GTF_IND_UNALIGNED) != 0; } + // True if this indirection is invariant. + bool IsInvariantLoad() const + { + bool isInvariant = (gtFlags & GTF_IND_INVARIANT) != 0; + assert(!isInvariant || OperIs(GT_IND, GT_BLK)); + return isInvariant; + } + #if DEBUGGABLE_GENTREE // Used only for GenTree::GetVtableForOper() GenTreeIndir() : GenTreeOp() diff --git a/src/coreclr/jit/hwintrinsicarm64.cpp b/src/coreclr/jit/hwintrinsicarm64.cpp index f2a6fa727d219..0234f4ed23cfa 100644 --- a/src/coreclr/jit/hwintrinsicarm64.cpp +++ b/src/coreclr/jit/hwintrinsicarm64.cpp @@ -547,7 +547,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector128_Ceiling: { assert(sig->numArgs == 1); - assert(varTypeIsFloating(simdBaseType)); + + if (!varTypeIsFloating(simdBaseType)) + { + retNode = impSIMDPopStack(); + break; + } op1 = impSIMDPopStack(); retNode = gtNewSimdCeilNode(retType, op1, simdBaseJitType, simdSize); @@ -1098,7 +1103,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector128_Floor: { assert(sig->numArgs == 1); - assert(varTypeIsFloating(simdBaseType)); + + if (!varTypeIsFloating(simdBaseType)) + { + retNode = impSIMDPopStack(); + break; + } op1 = impSIMDPopStack(); retNode = gtNewSimdFloorNode(retType, op1, simdBaseJitType, simdSize); @@ -1728,6 +1738,48 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, break; } + case NI_AdvSimd_StoreVector64x2: + case NI_AdvSimd_Arm64_StoreVector128x2: + { + assert(sig->numArgs == 2); + assert(retType == TYP_VOID); + + CORINFO_ARG_LIST_HANDLE arg1 = sig->args; + CORINFO_ARG_LIST_HANDLE arg2 = info.compCompHnd->getArgNext(arg1); + var_types argType = TYP_UNKNOWN; + CORINFO_CLASS_HANDLE argClass = NO_CLASS_HANDLE; + + argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg2, &argClass))); + op2 = impPopStack().val; + unsigned fieldCount = info.compCompHnd->getClassNumInstanceFields(argClass); + argType = JITtype2varType(strip(info.compCompHnd->getArgType(sig, arg1, &argClass))); + op1 = getArgForHWIntrinsic(argType, argClass); + + assert(op2->TypeGet() == TYP_STRUCT); + if (op1->OperIs(GT_CAST)) + { + // Although the API specifies a pointer, if what we have is a BYREF, that's what + // we really want, so throw away the cast. + if (op1->gtGetOp1()->TypeGet() == TYP_BYREF) + { + op1 = op1->gtGetOp1(); + } + } + + if (!op2->OperIs(GT_LCL_VAR)) + { + unsigned tmp = lvaGrabTemp(true DEBUGARG("StoreVectorNx2 temp tree")); + + impStoreTemp(tmp, op2, CHECK_SPILL_NONE); + op2 = gtNewLclvNode(tmp, argType); + } + op2 = gtConvertTableOpToFieldList(op2, fieldCount); + + info.compNeedsConsecutiveRegisters = true; + retNode = gtNewSimdHWIntrinsicNode(retType, op1, op2, intrinsic, simdBaseJitType, simdSize); + break; + } + case NI_Vector64_Sum: case NI_Vector128_Sum: { diff --git a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp index 33ce9b2f21644..2e2254faa19bf 100644 --- a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp +++ b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp @@ -763,6 +763,34 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node) GetEmitter()->emitIns_R_R_R(ins, emitTypeSize(intrin.baseType), op2Reg, op3Reg, op1Reg); break; + case NI_AdvSimd_StoreVector64x2: + case NI_AdvSimd_Arm64_StoreVector128x2: + { + unsigned regCount = 0; + + assert(intrin.op2->OperIsFieldList()); + + GenTreeFieldList* fieldList = intrin.op2->AsFieldList(); + GenTree* firstField = fieldList->Uses().GetHead()->GetNode(); + op2Reg = firstField->GetRegNum(); + +#ifdef DEBUG + regNumber argReg = op2Reg; + for (GenTreeFieldList::Use& use : fieldList->Uses()) + { + regCount++; + + GenTree* argNode = use.GetNode(); + assert(argReg == argNode->GetRegNum()); + argReg = REG_NEXT(argReg); + } + assert(regCount == 2); +#endif + + GetEmitter()->emitIns_R_R(ins, emitSize, op2Reg, op1Reg, opt); + break; + } + case NI_Vector64_CreateScalarUnsafe: case NI_Vector128_CreateScalarUnsafe: if (intrin.op1->isContainedFltOrDblImmed()) diff --git a/src/coreclr/jit/hwintrinsiclistarm64.h b/src/coreclr/jit/hwintrinsiclistarm64.h index 3aebd051ecbb6..d0fdc1be6da7a 100644 --- a/src/coreclr/jit/hwintrinsiclistarm64.h +++ b/src/coreclr/jit/hwintrinsiclistarm64.h @@ -467,6 +467,7 @@ HARDWARE_INTRINSIC(AdvSimd, SignExtendWideningUpper, HARDWARE_INTRINSIC(AdvSimd, SqrtScalar, 8, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_fsqrt, INS_fsqrt}, HW_Category_SIMD, HW_Flag_SIMDScalar) HARDWARE_INTRINSIC(AdvSimd, Store, -1, 2, true, {INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1}, HW_Category_Helper, HW_Flag_SpecialImport|HW_Flag_BaseTypeFromSecondArg|HW_Flag_NoCodeGen) HARDWARE_INTRINSIC(AdvSimd, StoreSelectedScalar, -1, 3, true, {INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1, INS_st1}, HW_Category_MemoryStore, HW_Flag_BaseTypeFromSecondArg|HW_Flag_HasImmediateOperand|HW_Flag_SIMDScalar|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(AdvSimd, StoreVector64x2, 8, 2, true, {INS_st2, INS_st2, INS_st2, INS_st2, INS_st2, INS_st2, INS_invalid, INS_invalid, INS_st2, INS_invalid}, HW_Category_MemoryStore, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialImport|HW_Flag_SpecialCodeGen|HW_Flag_NeedsConsecutiveRegisters) HARDWARE_INTRINSIC(AdvSimd, Subtract, -1, 2, true, {INS_sub, INS_sub, INS_sub, INS_sub, INS_sub, INS_sub, INS_sub, INS_sub, INS_fsub, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) HARDWARE_INTRINSIC(AdvSimd, SubtractHighNarrowingLower, 8, 2, true, {INS_subhn, INS_subhn, INS_subhn, INS_subhn, INS_subhn, INS_subhn, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) HARDWARE_INTRINSIC(AdvSimd, SubtractHighNarrowingUpper, 16, 3, true, {INS_subhn2, INS_subhn2, INS_subhn2, INS_subhn2, INS_subhn2, INS_subhn2, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_HasRMWSemantics) @@ -645,6 +646,7 @@ HARDWARE_INTRINSIC(AdvSimd_Arm64, StorePair, HARDWARE_INTRINSIC(AdvSimd_Arm64, StorePairScalar, 8, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_stp, INS_stp, INS_invalid, INS_invalid, INS_stp, INS_invalid}, HW_Category_MemoryStore, HW_Flag_BaseTypeFromSecondArg|HW_Flag_SpecialCodeGen) HARDWARE_INTRINSIC(AdvSimd_Arm64, StorePairScalarNonTemporal, 8, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_stnp, INS_stnp, INS_invalid, INS_invalid, INS_stnp, INS_invalid}, HW_Category_MemoryStore, HW_Flag_BaseTypeFromSecondArg|HW_Flag_SpecialCodeGen) HARDWARE_INTRINSIC(AdvSimd_Arm64, StorePairNonTemporal, -1, 3, true, {INS_stnp, INS_stnp, INS_stnp, INS_stnp, INS_stnp, INS_stnp, INS_stnp, INS_stnp, INS_stnp, INS_stp}, HW_Category_MemoryStore, HW_Flag_BaseTypeFromSecondArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(AdvSimd_Arm64, StoreVector128x2, 16, 2, true, {INS_st2, INS_st2, INS_st2, INS_st2, INS_st2, INS_st2, INS_st2, INS_st2, INS_st2, INS_st2}, HW_Category_MemoryStore, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialImport|HW_Flag_SpecialCodeGen|HW_Flag_NeedsConsecutiveRegisters) HARDWARE_INTRINSIC(AdvSimd_Arm64, Subtract, 16, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_fsub}, HW_Category_SIMD, HW_Flag_NoFlag) HARDWARE_INTRINSIC(AdvSimd_Arm64, SubtractSaturateScalar, 8, 2, true, {INS_sqsub, INS_uqsub, INS_sqsub, INS_uqsub, INS_sqsub, INS_uqsub, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SIMDScalar) HARDWARE_INTRINSIC(AdvSimd_Arm64, TransposeEven, -1, 2, true, {INS_trn1, INS_trn1, INS_trn1, INS_trn1, INS_trn1, INS_trn1, INS_trn1, INS_trn1, INS_trn1, INS_trn1}, HW_Category_SIMD, HW_Flag_NoFlag) diff --git a/src/coreclr/jit/hwintrinsicxarch.cpp b/src/coreclr/jit/hwintrinsicxarch.cpp index 065999982a87a..89dc60031d531 100644 --- a/src/coreclr/jit/hwintrinsicxarch.cpp +++ b/src/coreclr/jit/hwintrinsicxarch.cpp @@ -1371,7 +1371,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector512_Ceiling: { assert(sig->numArgs == 1); - assert(varTypeIsFloating(simdBaseType)); + + if (!varTypeIsFloating(simdBaseType)) + { + retNode = impSIMDPopStack(); + break; + } if ((simdSize < 32) && !compOpportunisticallyDependsOn(InstructionSet_SSE41)) { @@ -1986,7 +1991,12 @@ GenTree* Compiler::impSpecialIntrinsic(NamedIntrinsic intrinsic, case NI_Vector512_Floor: { assert(sig->numArgs == 1); - assert(varTypeIsFloating(simdBaseType)); + + if (!varTypeIsFloating(simdBaseType)) + { + retNode = impSIMDPopStack(); + break; + } if ((simdSize < 32) && !compOpportunisticallyDependsOn(InstructionSet_SSE41)) { diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index f51417453225a..b4b80f311873f 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -793,5 +793,10 @@ PhaseStatus Compiler::optIfConversion() } #endif + if (madeChanges) + { + fgRenumberBlocks(); + } + return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; } diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 8d8135d239ecb..baf481d773fa6 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -1966,7 +1966,7 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H /* Mark the node as having a side-effect - i.e. cannot be * moved around since it is tied to a fixed location (EAX) */ - arg->gtFlags |= GTF_ORDER_SIDEEFF; + arg->SetHasOrderingSideEffect(); #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); @@ -9260,7 +9260,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full - trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during + trust apps). The reason this works is that JIT stores an i4 constant in GenTree union during importation and reads from the union as if it were a long during code generation. Though this can potentially read garbage, one can get lucky to have this working correctly. @@ -9823,8 +9823,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) // byref is literally 0, and since the byref // leaks out here, we need to ensure it is // nullchecked. - nullcheck->gtFlags |= GTF_ORDER_SIDEEFF; - boxPayloadAddress->gtFlags |= GTF_ORDER_SIDEEFF; + nullcheck->SetHasOrderingSideEffect(); + boxPayloadAddress->SetHasOrderingSideEffect(); GenTree* result = gtNewOperNode(GT_COMMA, TYP_BYREF, nullcheck, boxPayloadAddress); impPushOnStack(result, tiRetVal); break; diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index d340354d34ef1..e8efd2a316980 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -2937,8 +2937,8 @@ GenTree* Compiler::impIntrinsic(GenTree* newobjThis, // Add an ordering dependency between the bounds check and // forming the byref to prevent these from being reordered. The // JIT is not allowed to create arbitrary illegal byrefs. - boundsCheck->gtFlags |= GTF_ORDER_SIDEEFF; - result->gtFlags |= GTF_ORDER_SIDEEFF; + boundsCheck->SetHasOrderingSideEffect(); + result->SetHasOrderingSideEffect(); retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result); break; diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index a257ebc173502..e9128fc26d13f 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -2379,7 +2379,7 @@ bool Compiler::fgCreateFiltersForGenericExceptions() // Now we need to spill CATCH_ARG (it should be the first thing evaluated) GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF); - arg->gtFlags |= GTF_ORDER_SIDEEFF; + arg->SetHasOrderingSideEffect(); unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg")); lvaTable[tempNum].lvType = TYP_REF; GenTree* argStore = gtNewTempStore(tempNum, arg); diff --git a/src/coreclr/jit/jitstd/vector.h b/src/coreclr/jit/jitstd/vector.h index 69f67817ecb6e..78dadd3651e87 100644 --- a/src/coreclr/jit/jitstd/vector.h +++ b/src/coreclr/jit/jitstd/vector.h @@ -145,6 +145,7 @@ class vector // cctors vector(const vector& vec); + vector(vector&& vec); template explicit vector(const vector& vec); @@ -195,6 +196,8 @@ class vector template vector& operator=(const vector& vec); + vector& operator=(vector&& vec); + reference operator[](size_type n); const_reference operator[](size_type n) const; @@ -328,6 +331,18 @@ vector::vector(const vector& vec) } } +template +vector::vector(vector&& vec) + : m_allocator(vec.m_allocator) + , m_pArray(vec.m_pArray) + , m_nSize(vec.m_nSize) + , m_nCapacity(vec.m_nCapacity) +{ + vec.m_pArray = nullptr; + vec.m_nSize = 0; + vec.m_nCapacity = 0; +} + template vector::~vector() { @@ -578,6 +593,20 @@ vector& vector::operator=(const vector return *this; } +template +vector& vector::operator=(vector&& vec) +{ + m_allocator = vec.m_allocator; + m_pArray = vec.m_pArray; + m_nSize = vec.m_nSize; + m_nCapacity = vec.m_nCapacity; + + vec.m_pArray = nullptr; + vec.m_nSize = 0; + vec.m_nCapacity = 0; + + return *this; +} template typename vector::reference vector::operator[](size_type n) diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index d50081da99434..8805610d56a1a 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -47,7 +47,7 @@ void ArrIndex::PrintBoundsCheckNodes(unsigned dim /* = -1 */) #endif // DEBUG //-------------------------------------------------------------------------------------------------- -// ToGenTree - Convert an arrLen operation into a gentree node. +// ToGenTree - Convert an arrLen operation into a GenTree node. // // Arguments: // comp Compiler instance to allocate trees @@ -111,7 +111,7 @@ GenTree* LC_Array::ToGenTree(Compiler* comp, BasicBlock* bb) } //-------------------------------------------------------------------------------------------------- -// ToGenTree - Convert an "identifier" into a gentree node. +// ToGenTree - Convert an "identifier" into a GenTree node. // // Arguments: // comp Compiler instance to allocate trees @@ -170,7 +170,7 @@ GenTree* LC_Ident::ToGenTree(Compiler* comp, BasicBlock* bb) } //-------------------------------------------------------------------------------------------------- -// ToGenTree - Convert an "expression" into a gentree node. +// ToGenTree - Convert an "expression" into a GenTree node. // // Arguments: // comp Compiler instance to allocate trees @@ -195,7 +195,7 @@ GenTree* LC_Expr::ToGenTree(Compiler* comp, BasicBlock* bb) } //-------------------------------------------------------------------------------------------------- -// ToGenTree - Convert a "condition" into a gentree node. +// ToGenTree - Convert a "condition" into a GenTree node. // // Arguments: // comp Compiler instance to allocate trees @@ -1705,7 +1705,8 @@ void Compiler::optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* JITDUMP("Updating flags on GDV guard inside hot loop. Before:\n"); DISPSTMT(stmt); - indir->gtFlags |= GTF_ORDER_SIDEEFF | GTF_IND_NONFAULTING; + indir->gtFlags |= GTF_IND_NONFAULTING; + indir->SetHasOrderingSideEffect(); indir->gtFlags &= ~GTF_EXCEPT; assert(fgNodeThreading == NodeThreading::None); gtUpdateStmtSideEffects(stmt); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 25cfb5409d957..7bc73d6bcecf0 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -1489,7 +1489,7 @@ GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, CallArg* callArg, call, putInIncomingArgArea); #if defined(DEBUG) && defined(FEATURE_PUT_STRUCT_ARG_STK) - if (callArg->AbiInfo.IsStruct) + if (varTypeIsStruct(callArg->GetSignatureType())) { // We use GT_BLK only for non-SIMD struct arguments. if (arg->OperIs(GT_BLK)) @@ -1798,11 +1798,12 @@ GenTree* Lowering::AddrGen(void* addr) // // Arguments: // tree - GenTreeCall node to replace with STORE_BLK +// next - [out] Next node to lower if this function returns true // // Return Value: -// nullptr if no changes were made +// false if no changes were made // -GenTree* Lowering::LowerCallMemmove(GenTreeCall* call) +bool Lowering::LowerCallMemmove(GenTreeCall* call, GenTree** next) { JITDUMP("Considering Memmove [%06d] for unrolling.. ", comp->dspTreeID(call)) assert(comp->lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_Buffer_Memmove); @@ -1812,7 +1813,7 @@ GenTree* Lowering::LowerCallMemmove(GenTreeCall* call) if (comp->info.compHasNextCallRetAddr) { JITDUMP("compHasNextCallRetAddr=true so we won't be able to remove the call - bail out.\n") - return nullptr; + return false; } GenTree* lengthArg = call->gtArgs.GetUserArgByIndex(2)->GetNode(); @@ -1857,7 +1858,9 @@ GenTree* Lowering::LowerCallMemmove(GenTreeCall* call) JITDUMP("\nNew tree:\n") DISPTREE(storeBlk); - return storeBlk; + // TODO: This skips lowering srcBlk and storeBlk. + *next = storeBlk->gtNext; + return true; } else { @@ -1868,7 +1871,7 @@ GenTree* Lowering::LowerCallMemmove(GenTreeCall* call) { JITDUMP("size is not a constant.\n") } - return nullptr; + return false; } //------------------------------------------------------------------------ @@ -1877,11 +1880,12 @@ GenTree* Lowering::LowerCallMemmove(GenTreeCall* call) // // Arguments: // tree - GenTreeCall node to unroll as memcmp +// next - [out] Next node to lower if this function returns true // // Return Value: -// nullptr if no changes were made +// false if no changes were made // -GenTree* Lowering::LowerCallMemcmp(GenTreeCall* call) +bool Lowering::LowerCallMemcmp(GenTreeCall* call, GenTree** next) { JITDUMP("Considering Memcmp [%06d] for unrolling.. ", comp->dspTreeID(call)) assert(comp->lookupNamedIntrinsic(call->gtCallMethHnd) == NI_System_SpanHelpers_SequenceEqual); @@ -1891,13 +1895,13 @@ GenTree* Lowering::LowerCallMemcmp(GenTreeCall* call) if (!comp->opts.OptimizationEnabled()) { JITDUMP("Optimizations aren't allowed - bail out.\n") - return nullptr; + return false; } if (comp->info.compHasNextCallRetAddr) { JITDUMP("compHasNextCallRetAddr=true so we won't be able to remove the call - bail out.\n") - return nullptr; + return false; } GenTree* lengthArg = call->gtArgs.GetUserArgByIndex(2)->GetNode(); @@ -2004,9 +2008,8 @@ GenTree* Lowering::LowerCallMemcmp(GenTreeCall* call) GenTree* rIndir = comp->gtNewIndir(loadType, rArg); result = newBinaryOp(comp, GT_EQ, TYP_INT, lIndir, rIndir); - BlockRange().InsertAfter(lArg, lIndir); - BlockRange().InsertAfter(rArg, rIndir); - BlockRange().InsertBefore(call, result); + BlockRange().InsertBefore(call, lIndir, rIndir, result); + *next = lIndir; } else { @@ -2020,51 +2023,77 @@ GenTree* Lowering::LowerCallMemcmp(GenTreeCall* call) GenTree* rArgClone = comp->gtNewLclvNode(rArgUse.ReplaceWithLclVar(comp), genActualType(rArg)); BlockRange().InsertBefore(call, lArgClone, rArgClone); - // We're going to emit something like the following: - // - // bool result = ((*(int*)leftArg ^ *(int*)rightArg) | - // (*(int*)(leftArg + 1) ^ *((int*)(rightArg + 1)))) == 0; - // - // ^ in the given example we unroll for length=5 - // - // In IR: - // - // * EQ int - // +--* OR int - // | +--* XOR int - // | | +--* IND int - // | | | \--* LCL_VAR byref V1 - // | | \--* IND int - // | | \--* LCL_VAR byref V2 - // | \--* XOR int - // | +--* IND int - // | | \--* ADD byref - // | | +--* LCL_VAR byref V1 - // | | \--* CNS_INT int 1 - // | \--* IND int - // | \--* ADD byref - // | +--* LCL_VAR byref V2 - // | \--* CNS_INT int 1 - // \--* CNS_INT int 0 - // + *next = lArgClone; + GenTree* l1Indir = comp->gtNewIndir(loadType, lArgUse.Def()); GenTree* r1Indir = comp->gtNewIndir(loadType, rArgUse.Def()); - GenTree* lXor = newBinaryOp(comp, GT_XOR, actualLoadType, l1Indir, r1Indir); GenTree* l2Offs = comp->gtNewIconNode(cnsSize - loadWidth, TYP_I_IMPL); GenTree* l2AddOffs = newBinaryOp(comp, GT_ADD, lArg->TypeGet(), lArgClone, l2Offs); GenTree* l2Indir = comp->gtNewIndir(loadType, l2AddOffs); - GenTree* r2Offs = comp->gtCloneExpr(l2Offs); // offset is the same + GenTree* r2Offs = comp->gtNewIconNode(cnsSize - loadWidth, TYP_I_IMPL); GenTree* r2AddOffs = newBinaryOp(comp, GT_ADD, rArg->TypeGet(), rArgClone, r2Offs); GenTree* r2Indir = comp->gtNewIndir(loadType, r2AddOffs); - GenTree* rXor = newBinaryOp(comp, GT_XOR, actualLoadType, l2Indir, r2Indir); - GenTree* resultOr = newBinaryOp(comp, GT_OR, actualLoadType, lXor, rXor); - GenTree* zeroCns = comp->gtNewZeroConNode(actualLoadType); - result = newBinaryOp(comp, GT_EQ, TYP_INT, resultOr, zeroCns); - - BlockRange().InsertAfter(rArgClone, l1Indir, r1Indir, l2Offs, l2AddOffs); - BlockRange().InsertAfter(l2AddOffs, l2Indir, r2Offs, r2AddOffs, r2Indir); - BlockRange().InsertAfter(r2Indir, lXor, rXor, resultOr, zeroCns); - BlockRange().InsertAfter(zeroCns, result); + + BlockRange().InsertAfter(rArgClone, l1Indir, l2Offs, l2AddOffs, l2Indir); + BlockRange().InsertAfter(l2Indir, r1Indir, r2Offs, r2AddOffs, r2Indir); + +#ifdef TARGET_ARM64 + if (!varTypeIsSIMD(loadType)) + { + // ARM64 will get efficient ccmp codegen if we emit the normal thing: + // + // bool result = (*(int*)leftArg == *(int)rightArg) & (*(int*)(leftArg + 1) == *(int*)(rightArg + // + + // 1)) + + GenTree* eq1 = newBinaryOp(comp, GT_EQ, TYP_INT, l1Indir, r1Indir); + GenTree* eq2 = newBinaryOp(comp, GT_EQ, TYP_INT, l2Indir, r2Indir); + result = newBinaryOp(comp, GT_AND, TYP_INT, eq1, eq2); + + BlockRange().InsertAfter(r2Indir, eq1, eq2, result); + } +#endif + + if (result == nullptr) + { + // We're going to emit something like the following: + // + // bool result = ((*(int*)leftArg ^ *(int*)rightArg) | + // (*(int*)(leftArg + 1) ^ *((int*)(rightArg + 1)))) == 0; + // + // ^ in the given example we unroll for length=5 + // + // In IR: + // + // * EQ int + // +--* OR int + // | +--* XOR int + // | | +--* IND int + // | | | \--* LCL_VAR byref V1 + // | | \--* IND int + // | | \--* LCL_VAR byref V2 + // | \--* XOR int + // | +--* IND int + // | | \--* ADD byref + // | | +--* LCL_VAR byref V1 + // | | \--* CNS_INT int 1 + // | \--* IND int + // | \--* ADD byref + // | +--* LCL_VAR byref V2 + // | \--* CNS_INT int 1 + // \--* CNS_INT int 0 + // + // TODO-CQ: Do this as a general optimization similar to TryLowerAndOrToCCMP. + + GenTree* lXor = newBinaryOp(comp, GT_XOR, actualLoadType, l1Indir, r1Indir); + GenTree* rXor = newBinaryOp(comp, GT_XOR, actualLoadType, l2Indir, r2Indir); + GenTree* resultOr = newBinaryOp(comp, GT_OR, actualLoadType, lXor, rXor); + GenTree* zeroCns = comp->gtNewZeroConNode(actualLoadType); + result = newBinaryOp(comp, GT_EQ, TYP_INT, resultOr, zeroCns); + + BlockRange().InsertAfter(r2Indir, lXor, rXor, resultOr, zeroCns); + BlockRange().InsertAfter(zeroCns, result); + } } JITDUMP("\nUnrolled to:\n"); @@ -2090,7 +2119,7 @@ GenTree* Lowering::LowerCallMemcmp(GenTreeCall* call) arg.GetNode()->SetUnusedValue(); } } - return lArg; + return true; } } else @@ -2102,7 +2131,7 @@ GenTree* Lowering::LowerCallMemcmp(GenTreeCall* call) { JITDUMP("size is not a constant.\n") } - return nullptr; + return false; } // do lowering steps for a call @@ -2133,20 +2162,12 @@ GenTree* Lowering::LowerCall(GenTree* node) #if defined(TARGET_AMD64) || defined(TARGET_ARM64) if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) { - GenTree* newNode = nullptr; - NamedIntrinsic ni = comp->lookupNamedIntrinsic(call->gtCallMethHnd); - if (ni == NI_System_Buffer_Memmove) - { - newNode = LowerCallMemmove(call); - } - else if (ni == NI_System_SpanHelpers_SequenceEqual) + GenTree* nextNode = nullptr; + NamedIntrinsic ni = comp->lookupNamedIntrinsic(call->gtCallMethHnd); + if (((ni == NI_System_Buffer_Memmove) && LowerCallMemmove(call, &nextNode)) || + ((ni == NI_System_SpanHelpers_SequenceEqual) && LowerCallMemcmp(call, &nextNode))) { - newNode = LowerCallMemcmp(call); - } - - if (newNode != nullptr) - { - return newNode->gtNext; + return nextNode; } } #endif @@ -7781,6 +7802,285 @@ void Lowering::ContainCheckBitCast(GenTree* node) } } +struct StoreCoalescingData +{ + var_types targetType; + GenTree* baseAddr; + GenTree* index; + GenTree* value; + uint32_t scale; + int offset; +}; + +//------------------------------------------------------------------------ +// GetStoreCoalescingData: given a STOREIND node, get the data needed to perform +// store coalescing including pointer to the previous node. +// +// Arguments: +// comp - the compiler instance +// ind - the STOREIND node +// data - [OUT] the data needed for store coalescing +// +// Return Value: +// true if the data was successfully retrieved, false otherwise. +// Basically, false means that we definitely can't do store coalescing. +// +static bool GetStoreCoalescingData(Compiler* comp, GenTreeStoreInd* ind, StoreCoalescingData* data) +{ + // Don't merge volatile stores. + if (ind->IsVolatile()) + { + return false; + } + + // Data has to be INT_CNS, can be also VEC_CNS in future. + if (!ind->Data()->IsCnsIntOrI()) + { + return false; + } + + data->targetType = ind->TypeGet(); + data->value = ind->Data(); + if (ind->Addr()->OperIs(GT_LEA)) + { + GenTree* base = ind->Addr()->AsAddrMode()->Base(); + GenTree* index = ind->Addr()->AsAddrMode()->Index(); + if ((base == nullptr) || !base->OperIs(GT_LCL_VAR) || comp->lvaVarAddrExposed(base->AsLclVar()->GetLclNum())) + { + // Base must be a local. It's possible for it to be nullptr when index is not null, + // but let's ignore such cases. + return false; + } + + if ((index != nullptr) && + (!index->OperIs(GT_LCL_VAR) || comp->lvaVarAddrExposed(index->AsLclVar()->GetLclNum()))) + { + // Index should be either nullptr or a local. + return false; + } + + data->baseAddr = base == nullptr ? nullptr : base; + data->index = index == nullptr ? nullptr : index; + data->scale = ind->Addr()->AsAddrMode()->GetScale(); + data->offset = ind->Addr()->AsAddrMode()->Offset(); + } + else if (ind->Addr()->OperIs(GT_LCL_VAR) && !comp->lvaVarAddrExposed(ind->Addr()->AsLclVar()->GetLclNum())) + { + // Address is just a local, no offset, scale is 1 + data->baseAddr = ind->Addr(); + data->index = nullptr; + data->scale = 1; + data->offset = 0; + } + else + { + // Address is not LEA or local. + return false; + } + return true; +} + +//------------------------------------------------------------------------ +// LowerStoreIndirCoalescing: If the given STOREIND node is followed by a similar +// STOREIND node, try to merge them into a single store of a twice wider type. Example: +// +// * STOREIND int +// +--* LCL_VAR byref V00 +// \--* CNS_INT int 0x1 +// +// * STOREIND int +// +--* LEA(b+4) byref +// | \--* LCL_VAR byref V00 +// \--* CNS_INT int 0x2 +// +// We can merge these two into into a single store of 8 bytes with (0x1 | (0x2 << 32)) as the value +// +// * STOREIND long +// +--* LEA(b+0) byref +// | \--* LCL_VAR byref V00 +// \--* CNS_INT long 0x200000001 +// +// Arguments: +// ind - the current STOREIND node +// +void Lowering::LowerStoreIndirCoalescing(GenTreeStoreInd* ind) +{ +// LA, RISC-V and ARM32 more likely to recieve a terrible performance hit from +// unaligned accesses making this optimization questionable. +#if defined(TARGET_XARCH) || defined(TARGET_ARM64) + if (!comp->opts.OptimizationEnabled()) + { + return; + } + + // For now, we require the current STOREIND to have LEA (previous store may not have it) + // So we can easily adjust the offset, consider making it more flexible in future. + if (!ind->Addr()->OperIs(GT_LEA)) + { + return; + } + + // We're going to do it in a loop while we see suitable STOREINDs to coalesce. + // E.g.: we have the following LIR sequence: + // + // ...addr nodes... + // STOREIND(int) + // ...addr nodes... + // STOREIND(short) + // ...addr nodes... + // STOREIND(short) <-- we're here + // + // First we merge two 'short' stores, then we merge the result with the 'int' store + // to get a single store of 8 bytes. + do + { + // This check is not really needed, just for better throughput. + if (!ind->TypeIs(TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT)) + { + return; + } + + StoreCoalescingData currData; + StoreCoalescingData prevData; + + // Get coalescing data for the current STOREIND + if (!GetStoreCoalescingData(comp, ind, &currData)) + { + return; + } + + bool isClosedRange = false; + // Now we need to find the very first LIR node representing the current STOREIND + // and make sure that there are no other unexpected nodes in-between. + LIR::ReadOnlyRange currIndRange = BlockRange().GetTreeRange(ind, &isClosedRange); + if (!isClosedRange) + { + return; + } + GenTree* prevTree = currIndRange.FirstNode()->gtPrev; + // Now we need to find the previous STOREIND, + // we can ignore any NOPs or IL_OFFSETs in-between + while ((prevTree != nullptr) && prevTree->OperIs(GT_NOP, GT_IL_OFFSET)) + { + prevTree = prevTree->gtPrev; + } + + // It's not a STOREIND - bail out. + if ((prevTree == nullptr) || !prevTree->OperIs(GT_STOREIND)) + { + return; + } + + // Get coalescing data for the previous STOREIND + GenTreeStoreInd* prevInd = prevTree->AsStoreInd(); + if (!GetStoreCoalescingData(comp, prevInd->AsStoreInd(), &prevData)) + { + return; + } + + // Same for the previous STOREIND, make sure there are no unexpected nodes around. + LIR::ReadOnlyRange prevIndRange = BlockRange().GetTreeRange(prevInd, &isClosedRange); + if (!isClosedRange) + { + return; + } + + // STOREIND aren't value nodes. + LIR::Use use; + assert(!BlockRange().TryGetUse(prevInd, &use) && !BlockRange().TryGetUse(ind, &use)); + + // BaseAddr, Index, Scale and Type all have to match. + if ((prevData.scale != currData.scale) || (prevData.targetType != currData.targetType) || + !GenTree::Compare(prevData.baseAddr, currData.baseAddr) || + !GenTree::Compare(prevData.index, currData.index)) + { + return; + } + + // Offset has to match the size of the type. We don't support the same or overlapping offsets. + if (abs(prevData.offset - currData.offset) != (int)genTypeSize(prevData.targetType)) + { + return; + } + + // Since we're merging two stores of the same type, the new type is twice wider. + var_types oldType = ind->TypeGet(); + var_types newType; + switch (oldType) + { + case TYP_BYTE: + case TYP_UBYTE: + newType = TYP_USHORT; + break; + + case TYP_SHORT: + case TYP_USHORT: + newType = TYP_INT; // TYP_UINT is not legal in IR + break; + +#ifdef TARGET_64BIT + case TYP_INT: + newType = TYP_LONG; + break; +#endif // TARGET_64BIT + + // TYP_FLOAT and TYP_DOUBLE aren't needed here - they're expected to + // be converted to TYP_INT/TYP_LONG for constant value. + // + // TODO-CQ: + // 2 x LONG/REF -> SIMD16 + // 2 x SIMD16 -> SIMD32 + // 2 x SIMD32 -> SIMD64 + // + // where it's legal (e.g. SIMD is not atomic on x64) + // + default: + return; + } + + // Delete previous STOREIND entirely + BlockRange().Remove(std::move(prevIndRange)); + + // We know it's always LEA for now + GenTreeAddrMode* addr = ind->Addr()->AsAddrMode(); + + // Update offset to be the minimum of the two + addr->SetOffset(min(prevData.offset, currData.offset)); + + // Update type for both STOREIND and val + ind->gtType = newType; + ind->Data()->gtType = newType; + + // We currently only support these constants for val + assert(prevData.value->IsCnsIntOrI() && currData.value->IsCnsIntOrI()); + + size_t lowerCns = (size_t)prevData.value->AsIntCon()->IconValue(); + size_t upperCns = (size_t)currData.value->AsIntCon()->IconValue(); + + // if the previous store was at a higher address, swap the constants + if (prevData.offset > currData.offset) + { + std::swap(lowerCns, upperCns); + } + + // Trim the constants to the size of the type, e.g. for TYP_SHORT and TYP_USHORT + // the mask will be 0xFFFF, for TYP_INT - 0xFFFFFFFF. + size_t mask = ~(size_t(0)) >> (sizeof(size_t) - genTypeSize(oldType)) * BITS_IN_BYTE; + lowerCns &= mask; + upperCns &= mask; + + size_t val = (lowerCns | (upperCns << (genTypeSize(oldType) * BITS_IN_BYTE))); + JITDUMP("Coalesced two stores into a single store with value %lld\n", (int64_t)val); + + // It's not expected to be contained yet, but just in case... + ind->Data()->ClearContained(); + ind->Data()->AsIntCon()->gtIconVal = (ssize_t)val; + ind->gtFlags |= GTF_IND_UNALIGNED; + + } while (true); +#endif // TARGET_XARCH || TARGET_ARM64 +} + //------------------------------------------------------------------------ // LowerStoreIndirCommon: a common logic to lower StoreIndir. // @@ -7821,6 +8121,7 @@ void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind) } #endif + LowerStoreIndirCoalescing(ind); LowerStoreIndir(ind); } } diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h index 3331d0a44975c..8ffe86c9a51b1 100644 --- a/src/coreclr/jit/lower.h +++ b/src/coreclr/jit/lower.h @@ -133,8 +133,8 @@ class Lowering final : public Phase // Call Lowering // ------------------------------ GenTree* LowerCall(GenTree* call); - GenTree* LowerCallMemmove(GenTreeCall* call); - GenTree* LowerCallMemcmp(GenTreeCall* call); + bool LowerCallMemmove(GenTreeCall* call, GenTree** next); + bool LowerCallMemcmp(GenTreeCall* call, GenTree** next); void LowerCFGCall(GenTreeCall* call); void MoveCFGCallArg(GenTreeCall* call, GenTree* node); #ifndef TARGET_64BIT @@ -269,7 +269,7 @@ class Lowering final : public Phase // operands. // // Arguments: - // tree - Gentree of a binary operation. + // tree - GenTree of a binary operation. // isSafeToMarkOp1 True if it's safe to mark op1 as register optional // isSafeToMarkOp2 True if it's safe to mark op2 as register optional // @@ -312,6 +312,7 @@ class Lowering final : public Phase void LowerStoreIndirCommon(GenTreeStoreInd* ind); void LowerIndir(GenTreeIndir* ind); void LowerStoreIndir(GenTreeStoreInd* node); + void LowerStoreIndirCoalescing(GenTreeStoreInd* node); GenTree* LowerAdd(GenTreeOp* node); GenTree* LowerMul(GenTreeOp* mul); bool TryLowerAndNegativeOne(GenTreeOp* node, GenTree** nextNode); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index 8c088ac3bc1bf..ec19a65c13464 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -115,7 +115,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // lsraAssignRegToTree: Assign the given reg to tree node. // // Arguments: -// tree - Gentree node +// tree - GenTree node // reg - register to be assigned // regIdx - register idx, if tree is a multi-reg call node. // regIdx will be zero for single-reg result producing tree nodes. @@ -416,7 +416,7 @@ void LinearScan::updateRegsFreeBusyState(RefPosition& refPosition, // It is unlikely, if an internal register is the only use of floating point, // that it will select a callee-save register. But to be safe, we restrict // the set of candidates if compFloatingPointUsed is not already set. - +// regMaskTP LinearScan::internalFloatRegCandidates() { needNonIntegerRegisters = true; @@ -451,7 +451,7 @@ RegRecord* LinearScan::getRegisterRecord(regNumber regNum) // minRegCount registers, otherwise returns regMaskActual. // // Arguments: -// refposition - RefPosition for which we want to constain. +// refPosition - RefPosition which we want to constrain. // regMaskActual - regMask that needs to be constrained // regMaskConstraint - regMask constraint that needs to be // applied to regMaskActual @@ -459,8 +459,9 @@ RegRecord* LinearScan::getRegisterRecord(regNumber regNum) // be present in new regMask. // // Return Value: -// New regMask that has minRegCount registers after instersection. +// New regMask that has minRegCount registers after intersection. // Otherwise returns regMaskActual. +// regMaskTP LinearScan::getConstrainedRegMask(RefPosition* refPosition, regMaskTP regMaskActual, regMaskTP regMaskConstraint, @@ -500,7 +501,7 @@ regMaskTP LinearScan::getConstrainedRegMask(RefPosition* refPosition, // Notes: // This is the method used to implement the stress options that limit // the set of registers considered for allocation. - +// regMaskTP LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask) { #ifdef TARGET_ARM64 @@ -588,7 +589,7 @@ regMaskTP LinearScan::stressLimitRegs(RefPosition* refPosition, regMaskTP mask) // // Assumptions: // 'refPosition is non-null. - +// bool LinearScan::conflictingFixedRegReference(regNumber regNum, RefPosition* refPosition) { // Is this a fixed reference of this register? If so, there is no conflict. @@ -697,7 +698,7 @@ LinearScanInterface* getLinearScanAllocator(Compiler* comp) // The constructor takes care of initializing the data structures that are used // during Lowering, including (in DEBUG) getting the stress environment variables, // as they may affect the block ordering. - +// LinearScan::LinearScan(Compiler* theCompiler) : compiler(theCompiler) , intervals(theCompiler->getAllocator(CMK_LSRA_Interval)) @@ -850,7 +851,7 @@ LinearScan::LinearScan(Compiler* theCompiler) // However, a block may be in the list and already selected, if it was subsequently // encountered as both a flow and layout successor of the most recently selected // block. - +// BasicBlock* LinearScan::getNextCandidateFromWorkList() { BasicBlockList* nextWorkList = nullptr; @@ -881,7 +882,7 @@ BasicBlock* LinearScan::getNextCandidateFromWorkList() // will be allocated. // This method clears the bbVisitedSet on LinearScan, and when it returns the set // contains all the bbNums for the block. - +// void LinearScan::setBlockSequence() { assert(!blockSequencingDone); // The method should be called only once. @@ -1124,6 +1125,7 @@ void LinearScan::setBlockSequence() // // Notes: // See addToBlockSequenceWorkList. +// int LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block2, bool useBlockWeights) { if (useBlockWeights) @@ -1186,6 +1188,7 @@ int LinearScan::compareBlocksForSequencing(BasicBlock* block1, BasicBlock* block // Note also that, when random traversal order is implemented, this method // should insert the blocks into the list in random order, so that we can always // simply select the first block in the list. +// void LinearScan::addToBlockSequenceWorkList(BlockSet sequencedBlockSet, BasicBlock* block, BlockSet& predSet) { // The block that is being added is not already sequenced @@ -1302,7 +1305,7 @@ BasicBlock* LinearScan::startBlockSequence() // Notes: // This method is used when the next block is actually going to be handled. // It changes curBBNum. - +// BasicBlock* LinearScan::moveToNextBlock() { BasicBlock* nextBlock = getNextBlock(); @@ -1326,7 +1329,7 @@ BasicBlock* LinearScan::moveToNextBlock() // Notes: // This method does not actually change the current block - it is used simply // to determine which block will be next. - +// BasicBlock* LinearScan::getNextBlock() { assert(blockSequencingDone); @@ -1450,7 +1453,7 @@ PhaseStatus LinearScan::doLinearScan() // CodeGen will take care of updating the reg masks and the current var liveness, // after calling this method. // This is because we need to kill off the dead registers before setting the newly live ones. - +// void LinearScan::recordVarLocationsAtStartOfBB(BasicBlock* bb) { if (!enregisterLocalVars) @@ -2463,7 +2466,7 @@ void LinearScan::checkLastUses(BasicBlock* block) // For this case, the block returned is the same as for LSRA_BLOCK_BOUNDARY_PRED, but // the register locations will be "rotated" to stress the resolution and allocation // code. - +// BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, BasicBlock* prevBlock DEBUGARG(bool* pPredBlockIsAllocated)) { @@ -2789,7 +2792,7 @@ bool copyOrMoveRegInUse(RefPosition* ref, LsraLocation loc) // of a RefTypeUse RefPosition (i.e. not when we are only looking at the type of an interval, nor when // we are interested in the "defining" type of the interval). This is because the situation of interest // only happens at the use (where it must be copied to an integer register). - +// RegisterType LinearScan::getRegisterType(Interval* currentInterval, RefPosition* refPosition) { assert(refPosition->getInterval() == currentInterval); @@ -2905,6 +2908,7 @@ bool LinearScan::isMatchingConstant(RegRecord* physRegRecord, RefPosition* refPo // Arguments: // currentInterval: The interval for the current allocation // refPosition: The RefPosition of the current Interval for which a register is being allocated +// // Return Value: // The regNumber, if any, allocated to the RefPosition. // Returns REG_NA only if 'refPosition->RegOptional()' is true, and there are @@ -3148,9 +3152,9 @@ void LinearScan::unassignDoublePhysReg(RegRecord* doubleRegRecord) // refLocation - the LsraLocation at which we want to know if it is active // // Return Value: -// True - if this RefPosition occurs at the given location, OR +// true - if this RefPosition occurs at the given location, OR // if it occurs at the previous location and is marked delayRegFree. -// False - otherwise +// false - otherwise // bool LinearScan::isRefPositionActive(RefPosition* refPosition, LsraLocation refLocation) { @@ -3416,8 +3420,12 @@ void LinearScan::setIntervalAsSpilled(Interval* interval) regNumber reg = interval->physReg; spillCost[reg] = getSpillWeight(getRegisterRecord(reg)); } + + // REVIEW: we fall into the code below which duplicates some of the above work: it will also add to + // `splitOrSpilledVars` as well as unconditionally set `interval->isSpilled = true`. } #endif + if (interval->isLocalVar) { unsigned varIndex = interval->getVarIndex(compiler); @@ -3434,7 +3442,7 @@ void LinearScan::setIntervalAsSpilled(Interval* interval) } //------------------------------------------------------------------------ -// spill: Spill the "interval" starting from "fromRefPosition" (upto "toRefPosition") +// spillInterval: Spill the "interval" starting from "fromRefPosition" (up to "toRefPosition") // // Arguments: // interval - The interval that contains the RefPosition to be spilled @@ -3481,8 +3489,6 @@ void LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition interval->firstRefPosition->singleDefSpill = true; } - assert(toRefPosition != nullptr); - #ifdef DEBUG if (VERBOSE) { @@ -3521,7 +3527,7 @@ void LinearScan::spillInterval(Interval* interval, RefPosition* fromRefPosition // Notes: // This method is used to unassign a register when an interval needs to be moved to a // different register, but not (yet) spilled. - +// void LinearScan::unassignPhysRegNoSpill(RegRecord* regRec) { Interval* assignedInterval = regRec->assignedInterval; @@ -3693,22 +3699,22 @@ void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPositio } makeRegAvailable(regToUnassign, assignedInterval->registerType); - RefPosition* nextRefPosition = nullptr; - if (spillRefPosition != nullptr) - { - nextRefPosition = spillRefPosition->nextRefPosition; - } - if (!intervalIsAssigned && assignedInterval->physReg != REG_NA) { // This must have been a temporary copy reg, but we can't assert that because there // may have been intervening RefPositions that were not copyRegs. - // reg->assignedInterval has already been set to nullptr by checkAndClearInterval() + // regRec->assignedInterval has already been set to nullptr by checkAndClearInterval() assert(regRec->assignedInterval == nullptr); return; } + RefPosition* nextRefPosition = nullptr; + if (spillRefPosition != nullptr) + { + nextRefPosition = spillRefPosition->nextRefPosition; + } + // regNumber victimAssignedReg = assignedInterval->physReg; assignedInterval->physReg = REG_NA; @@ -3739,6 +3745,7 @@ void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPositio } } #endif // 0 + #ifdef DEBUG // With JitStressRegs == 0x80 (LSRA_EXTEND_LIFETIMES), we may have a RefPosition // that is not marked lastUse even though the treeNode is a lastUse. In that case @@ -3777,6 +3784,7 @@ void LinearScan::unassignPhysReg(RegRecord* regRec, RefPosition* spillRefPositio spillInterval(assignedInterval, spillRefPosition DEBUGARG(nextRefPosition)); } } + // Maintain the association with the interval, if it has more references. // Or, if we "remembered" an interval assigned to this register, restore it. if (nextRefPosition != nullptr) @@ -3896,7 +3904,7 @@ void LinearScan::spillGCRefs(RefPosition* killRefPosition) // Notes: // Calls processBlockEndLocations() to set the outVarToRegMap, then gets the next block, // and sets the inVarToRegMap appropriately. - +// void LinearScan::processBlockEndAllocation(BasicBlock* currentBlock) { assert(currentBlock != nullptr); @@ -3934,7 +3942,7 @@ void LinearScan::processBlockEndAllocation(BasicBlock* currentBlock) // // Return Value: // The new register to use. - +// #ifdef DEBUG regNumber LinearScan::rotateBlockStartLocation(Interval* interval, regNumber targetReg, regMaskTP availableRegs) { @@ -4031,6 +4039,7 @@ RegRecord* LinearScan::getSecondHalfRegRec(RegRecord* regRec) return secondHalfRegRec; } + //------------------------------------------------------------------------------------------ // findAnotherHalfRegRec: Find another half RegRecord which forms same ARM32 double register // @@ -4048,6 +4057,7 @@ RegRecord* LinearScan::findAnotherHalfRegRec(RegRecord* regRec) regNumber anotherHalfRegNum = findAnotherHalfRegNum(regRec->regNum); return getRegisterRecord(anotherHalfRegNum); } + //------------------------------------------------------------------------------------------ // findAnotherHalfRegNum: Find another half register's number which forms same ARM32 double register // @@ -4081,7 +4091,7 @@ regNumber LinearScan::findAnotherHalfRegNum(regNumber regNum) return anotherHalfRegNum; } -#endif +#endif // TARGET_ARM //-------------------------------------------------------------------------------------- // canRestorePreviousInterval: Test if we can restore previous interval @@ -4201,6 +4211,7 @@ void LinearScan::resetAllRegistersState() // During the resolution (write-back when allocationPassComplete = true) pass, we only // modify the inVarToRegMap in cases where a lclVar was spilled after the block had been // completed. +// void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) { // If we have no register candidates we should only call this method during allocation. @@ -4557,7 +4568,7 @@ void LinearScan::processBlockStartLocations(BasicBlock* currentBlock) // This is because we need to have the outVarToRegMap locations in order to set the locations // at successor blocks during allocation time, but if lclVars are spilled after a block has been // completed, we need to record the REG_STK location for those variables at resolution time. - +// void LinearScan::processBlockEndLocations(BasicBlock* currentBlock) { assert(currentBlock != nullptr && currentBlock->bbNum == curBBNum); @@ -5885,7 +5896,7 @@ void LinearScan::allocateRegisters() } } } - if (currentRefPosition.lastUse || currentRefPosition.nextRefPosition == nullptr) + if (currentRefPosition.lastUse || (currentRefPosition.nextRefPosition == nullptr)) { assert(currentRefPosition.isIntervalRef()); // If this isn't a final use, we'll mark the register as available, but keep the association. @@ -6228,6 +6239,7 @@ void LinearScan::writeLocalReg(GenTreeLclVar* lclNode, unsigned varNum, regNumbe // Description: // Update the graph for a local reference. // Also, track the register (if any) that is currently occupied. +// // Arguments: // treeNode: The lclVar that's being resolved // currentRefPosition: the RefPosition associated with the treeNode @@ -6356,9 +6368,9 @@ void LinearScan::resolveLocalRef(BasicBlock* block, GenTreeLclVar* treeNode, Ref } } - bool reload = currentRefPosition->reload; - bool spillAfter = currentRefPosition->spillAfter; - bool writeThru = currentRefPosition->writeThru; + const bool reload = currentRefPosition->reload; + const bool spillAfter = currentRefPosition->spillAfter; + const bool writeThru = currentRefPosition->writeThru; // In the reload case we either: // - Set the register to REG_STK if it will be referenced only from the home location, or @@ -6707,11 +6719,11 @@ void LinearScan::insertCopyOrReload(BasicBlock* block, GenTree* tree, unsigned m // not preserved). // // Arguments: -// tree - This is the node before which we will insert the Save. -// It will be a call or some node that turns into a call. -// refPosition - The RefTypeUpperVectorSave RefPosition. -// upperInterval - The Interval for the upper half of the large vector lclVar. -// block - the BasicBlock containing the call. +// tree - This is the node before which we will insert the Save. +// It will be a call or some node that turns into a call. +// refPosition - The RefTypeUpperVectorSave RefPosition. +// upperVectorInterval - The Interval for the upper half of the large vector lclVar. +// block - the BasicBlock containing the call. // void LinearScan::insertUpperVectorSave(GenTree* tree, RefPosition* refPosition, @@ -6788,6 +6800,7 @@ void LinearScan::insertUpperVectorSave(GenTree* tree, // tree - This is the node for which we will insert the Restore. // If non-null, it will be a use of the large vector lclVar. // If null, the Restore will be added to the end of the block. +// refPosition - The RefTypeUpperVectorRestore RefPosition. // upperVectorInterval - The Interval for the upper vector for the lclVar. // block - the BasicBlock into which we will be inserting the code. // @@ -6903,7 +6916,7 @@ void LinearScan::insertUpperVectorRestore(GenTree* tree, // // Assumptions: // This is called before any calls to updateMaxSpill(). - +// void LinearScan::initMaxSpill() { needDoubleTmpForFPCall = false; @@ -6928,7 +6941,7 @@ void LinearScan::initMaxSpill() // Assumptions: // This is called after updateMaxSpill() has been called for all "real" // RefPositions. - +// void LinearScan::recordMaxSpill() { // Note: due to the temp normalization process (see tmpNormalizeType) @@ -6984,7 +6997,7 @@ void LinearScan::recordMaxSpill() // This is called for each "real" RefPosition during the writeback // phase of LSRA. It keeps track of how many concurrently-live // spills there are, and the largest number seen so far. - +// void LinearScan::updateMaxSpill(RefPosition* refPosition) { RefType refType = refPosition->refType; @@ -7008,6 +7021,7 @@ void LinearScan::updateMaxSpill(RefPosition* refPosition) return; } #endif // !FEATURE_PARTIAL_SIMD_CALLEE_SAVE + if (refPosition->spillAfter || refPosition->reload || (refPosition->RegOptional() && refPosition->assignedReg() == REG_NA)) { @@ -7069,7 +7083,7 @@ void LinearScan::updateMaxSpill(RefPosition* refPosition) } // This is the final phase of register allocation. It writes the register assignments to -// the tree, and performs resolution across joins and backedges. +// the tree, and performs resolution across joins and back edges. // template void LinearScan::resolveRegisters() @@ -7424,7 +7438,7 @@ void LinearScan::resolveRegisters() } // UpperVector intervals may have unique assignments at each reference. if (!currentInterval->isUpperVector) -#endif +#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE { if (nextRefPosition->assignedReg() != REG_NA) { @@ -7663,13 +7677,13 @@ void LinearScan::resolveRegisters() // compiler->BashUnusedStructLocals(); } -// //------------------------------------------------------------------------ // insertMove: Insert a move of a lclVar with the given lclNum into the given block. // // Arguments: // block - the BasicBlock into which the move will be inserted. -// insertionPoint - the instruction before which to insert the move +// insertionPoint - If non-nullptr, insert the move before this instruction, +// Otherwise, insert "near" the end (prior to the branch, if any). // lclNum - the lclNum of the var to be moved // fromReg - the register from which the var is moving // toReg - the register to which the var is moving @@ -7678,10 +7692,8 @@ void LinearScan::resolveRegisters() // None. // // Notes: -// If insertionPoint is non-NULL, insert before that instruction; -// otherwise, insert "near" the end (prior to the branch, if any). // If fromReg or toReg is REG_STK, then move from/to memory, respectively. - +// void LinearScan::insertMove( BasicBlock* block, GenTree* insertionPoint, unsigned lclNum, regNumber fromReg, regNumber toReg) { @@ -7854,7 +7866,7 @@ void LinearScan::insertSwap( // It is up to the caller to check the return value, and to determine whether a register is // available, and to handle that case appropriately. // It is also up to the caller to cache the return value, as this is not cheap to compute. - +// regNumber LinearScan::getTempRegForResolution(BasicBlock* fromBlock, BasicBlock* toBlock, var_types type, @@ -8124,7 +8136,7 @@ void LinearScan::addResolution(BasicBlock* block, // For all outgoing critical edges (i.e. any successor of this block which is // a join edge), if there are any conflicts, split the edge by adding a new block, // and generate the resolution code into that block. - +// void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) { VARSET_TP outResolutionSet(VarSetOps::Intersection(compiler, block->bbLiveOut, resolutionCandidateVars)); @@ -8457,7 +8469,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // 'join' resolution at the end of this block. // Note that a block may have both 'split' or 'critical' incoming edge(s) and 'join' outgoing // edges. - +// void LinearScan::resolveEdges() { JITDUMP("RESOLVING EDGES\n"); @@ -8674,7 +8686,7 @@ void LinearScan::resolveEdges() // new home (register/stack), it first does the register-to-stack moves (to free those // registers), then the register to register moves, ensuring that the target register // is free before the move, and then finally the stack to register moves. - +// void LinearScan::resolveEdge(BasicBlock* fromBlock, BasicBlock* toBlock, ResolveType resolveType, @@ -10370,7 +10382,7 @@ void LinearScan::dumpLsraAllocationEvent( dumpRefPositionShort(activeRefPosition, currentBlock); if (allocationPassComplete || (registerScore == 0)) { - printf("Reuse %-4s ", getRegName(reg)); + printf("Reuse %-4s ", getRegName(reg)); } else { @@ -10473,6 +10485,7 @@ void LinearScan::dumpRegRecordHeader() regColumnWidth = 4; } sprintf_s(intervalNameFormat, MAX_FORMAT_CHARS, "%%c%%-%dd", regColumnWidth - 2); + sprintf_s(smallLocalsIntervalNameFormat, MAX_FORMAT_CHARS, "%%c0%%-%dd", regColumnWidth - 3); sprintf_s(regNameFormat, MAX_FORMAT_CHARS, "%%-%ds", regColumnWidth); // Next, determine the width of the short RefPosition (see dumpRefPositionShort()). @@ -10488,7 +10501,7 @@ void LinearScan::dumpRegRecordHeader() maxNodeLocation = (maxNodeLocation == 0) ? 1 - : maxNodeLocation; // corner case of a method with an infinite loop without any gentree nodes + : maxNodeLocation; // corner case of a method with an infinite loop without any GenTree nodes assert(maxNodeLocation >= 1); assert(refPositions.size() >= 1); int treeIdWidth = 9; /* '[XXXXX] '*/ @@ -10593,6 +10606,7 @@ void LinearScan::dumpRegRecordTitleLines() } printf("%s\n", rightBox); } + void LinearScan::dumpRegRecordTitle() { dumpRegRecordTitleLines(); @@ -10661,11 +10675,25 @@ void LinearScan::dumpIntervalName(Interval* interval) { if (interval->isLocalVar) { - printf(intervalNameFormat, 'V', interval->varNum); + if (interval->varNum < 10) + { + printf(smallLocalsIntervalNameFormat, 'V', interval->varNum); + } + else + { + printf(intervalNameFormat, 'V', interval->varNum); + } } else if (interval->IsUpperVector()) { - printf(intervalNameFormat, 'U', interval->relatedInterval->varNum); + if (interval->relatedInterval->varNum < 10) + { + printf(smallLocalsIntervalNameFormat, 'U', interval->relatedInterval->varNum); + } + else + { + printf(intervalNameFormat, 'U', interval->relatedInterval->varNum); + } } else if (interval->isConstant) { @@ -10862,6 +10890,7 @@ bool LinearScan::IsResolutionNode(LIR::Range& containingRange, GenTree* node) // // Notes: // If verbose is set, this will also dump a table of the final allocations. +// void LinearScan::verifyFinalAllocation() { if (VERBOSE) @@ -11251,7 +11280,7 @@ void LinearScan::verifyFinalAllocation() interval->physReg = REG_NA; interval->assignedReg = nullptr; - // regRegcord could be null if the RefPosition does not require a register. + // regRecord could be null if the RefPosition does not require a register. if (regRecord != nullptr) { regRecord->assignedInterval = nullptr; @@ -11268,7 +11297,7 @@ void LinearScan::verifyFinalAllocation() assert((lclVarInterval->physReg == REG_NA) || lclVarInterval->isPartiallySpilled); } } -#endif +#endif // FEATURE_PARTIAL_SIMD_CALLEE_SAVE else { assert(currentRefPosition.RegOptional()); @@ -11377,6 +11406,7 @@ void LinearScan::verifyFinalAllocation() // // Notes: // If verbose is set, this will also dump the moves into the table of final allocations. +// void LinearScan::verifyResolutionMove(GenTree* resolutionMove, LsraLocation currentLocation) { GenTree* dst = resolutionMove; diff --git a/src/coreclr/jit/lsra.h b/src/coreclr/jit/lsra.h index 20941e45f9d1b..41fcc7e5d09b2 100644 --- a/src/coreclr/jit/lsra.h +++ b/src/coreclr/jit/lsra.h @@ -1456,6 +1456,7 @@ class LinearScan : public LinearScanInterface static const int MAX_FORMAT_CHARS = 12; char intervalNameFormat[MAX_FORMAT_CHARS]; + char smallLocalsIntervalNameFormat[MAX_FORMAT_CHARS]; // used for V01 to V09 (to match V%02u format) char regNameFormat[MAX_FORMAT_CHARS]; char shortRefPositionFormat[MAX_FORMAT_CHARS]; char emptyRefPositionFormat[MAX_FORMAT_CHARS]; @@ -2166,7 +2167,7 @@ class Interval : public Referenceable // is currently preferenced (e.g. because they are related by a copy) Interval* relatedInterval; - // The assignedReg is the RecRecord for the register to which this interval + // The assignedReg is the RegRecord for the register to which this interval // has been assigned at some point - if the interval is active, this is the // register it currently occupies. RegRecord* assignedReg; @@ -2582,7 +2583,7 @@ class RefPosition return genRegNumFromMask(registerAssignment); } - // Returns true if it is a reference on a gentree node. + // Returns true if it is a reference on a GenTree node. bool IsActualRef() { switch (refType) diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp index 516d6c8c48bf2..ad8ce0b2eca84 100644 --- a/src/coreclr/jit/lsraarm64.cpp +++ b/src/coreclr/jit/lsraarm64.cpp @@ -1120,7 +1120,7 @@ int LinearScan::BuildNode(GenTree* tree) if (sizeVal != 0) { // Compute the amount of memory to properly STACK_ALIGN. - // Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size. + // Note: The GenTree node is not updated here as it is cheap to recompute stack aligned size. // This should also help in debugging as we can examine the original size specified with // localloc. sizeVal = AlignUp(sizeVal, STACK_ALIGN); @@ -1548,25 +1548,41 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree, int* pDstCou else if (HWIntrinsicInfo::NeedsConsecutiveRegisters(intrin.id)) { - if ((intrin.id == NI_AdvSimd_VectorTableLookup) || (intrin.id == NI_AdvSimd_Arm64_VectorTableLookup)) - { - assert(intrin.op2 != nullptr); - srcCount += BuildOperandUses(intrin.op2); - } - else + switch (intrin.id) { - assert(intrin.op2 != nullptr); - assert(intrin.op3 != nullptr); - assert((intrin.id == NI_AdvSimd_VectorTableLookupExtension) || - (intrin.id == NI_AdvSimd_Arm64_VectorTableLookupExtension)); - assert(isRMW); - srcCount += BuildConsecutiveRegistersForUse(intrin.op2, intrin.op1); - srcCount += BuildDelayFreeUses(intrin.op3, intrin.op1); + case NI_AdvSimd_VectorTableLookup: + case NI_AdvSimd_Arm64_VectorTableLookup: + assert(intrin.op2 != nullptr); + srcCount += BuildOperandUses(intrin.op2); + assert(dstCount == 1); + buildInternalRegisterUses(); + BuildDef(intrinsicTree); + *pDstCount = 1; + break; + + case NI_AdvSimd_VectorTableLookupExtension: + case NI_AdvSimd_Arm64_VectorTableLookupExtension: + assert(intrin.op2 != nullptr); + assert(intrin.op3 != nullptr); + assert(isRMW); + srcCount += BuildConsecutiveRegistersForUse(intrin.op2, intrin.op1); + srcCount += BuildDelayFreeUses(intrin.op3, intrin.op1); + assert(dstCount == 1); + buildInternalRegisterUses(); + BuildDef(intrinsicTree); + *pDstCount = 1; + break; + case NI_AdvSimd_StoreVector64x2: + case NI_AdvSimd_Arm64_StoreVector128x2: + assert(intrin.op1 != nullptr); + srcCount += BuildConsecutiveRegistersForUse(intrin.op2); + assert(dstCount == 0); + buildInternalRegisterUses(); + *pDstCount = 0; + break; + default: + noway_assert(!"Not a supported as multiple consecutive register intrinsic"); } - assert(dstCount == 1); - buildInternalRegisterUses(); - BuildDef(intrinsicTree); - *pDstCount = 1; return srcCount; } else if (intrin.op2 != nullptr) diff --git a/src/coreclr/jit/lsraarmarch.cpp b/src/coreclr/jit/lsraarmarch.cpp index e72ff23df3375..39be77c9b3bd7 100644 --- a/src/coreclr/jit/lsraarmarch.cpp +++ b/src/coreclr/jit/lsraarmarch.cpp @@ -30,7 +30,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // of an indirection operation. // // Arguments: -// indirTree - GT_IND, GT_STOREIND or block gentree node +// indirTree - GT_IND, GT_STOREIND or block GenTree node // // Return Value: // The number of sources consumed by this node. diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp index 8c9025f61b703..c1ba537716dde 100644 --- a/src/coreclr/jit/lsrabuild.cpp +++ b/src/coreclr/jit/lsrabuild.cpp @@ -30,7 +30,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // The BuildNode methods use this helper to retrieve the RefPositions for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. - +// RefInfoListNode* RefInfoList::removeListNode(GenTree* node) { RefInfoListNode* prevListNode = nullptr; @@ -54,7 +54,7 @@ RefInfoListNode* RefInfoList::removeListNode(GenTree* node) // The BuildNode methods use this helper to retrieve the RefPositions for child nodes // from the useList being constructed. Note that, if the user knows the order of the operands, // it is expected that they should just retrieve them directly. - +// RefInfoListNode* RefInfoList::removeListNode(GenTree* node, unsigned multiRegIdx) { RefInfoListNode* prevListNode = nullptr; @@ -1343,7 +1343,7 @@ bool LinearScan::checkContainedOrCandidateLclVar(GenTreeLclVar* lclNode) // defineNewInternalTemp: Defines a ref position for an internal temp. // // Arguments: -// tree - Gentree node requiring an internal register +// tree - GenTree node requiring an internal register // regType - Register type // currentLoc - Location of the temp Def position // regMask - register mask of candidates for temp @@ -1362,7 +1362,7 @@ RefPosition* LinearScan::defineNewInternalTemp(GenTree* tree, RegisterType regTy // buildInternalRegisterDefForNode - Create an Interval for an internal int register, and a def RefPosition // // Arguments: -// tree - Gentree node that needs internal registers +// tree - GenTree node that needs internal registers // internalCands - The mask of valid registers // // Returns: @@ -1381,7 +1381,7 @@ RefPosition* LinearScan::buildInternalIntRegisterDefForNode(GenTree* tree, regMa // buildInternalFloatRegisterDefForNode - Create an Interval for an internal fp register, and a def RefPosition // // Arguments: -// tree - Gentree node that needs internal registers +// tree - GenTree node that needs internal registers // internalCands - The mask of valid registers // // Returns: @@ -3271,8 +3271,7 @@ RefPosition* LinearScan::BuildUse(GenTree* operand, regMaskTP candidates, int mu // int LinearScan::BuildIndirUses(GenTreeIndir* indirTree, regMaskTP candidates) { - GenTree* const addr = indirTree->gtOp1; - return BuildAddrUses(addr, candidates); + return BuildAddrUses(indirTree->Addr(), candidates); } int LinearScan::BuildAddrUses(GenTree* addr, regMaskTP candidates) @@ -3290,12 +3289,12 @@ int LinearScan::BuildAddrUses(GenTree* addr, regMaskTP candidates) GenTreeAddrMode* const addrMode = addr->AsAddrMode(); unsigned srcCount = 0; - if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained()) + if (addrMode->HasBase() && !addrMode->Base()->isContained()) { BuildUse(addrMode->Base(), candidates); srcCount++; } - if (addrMode->Index() != nullptr) + if (addrMode->HasIndex()) { if (!addrMode->Index()->isContained()) { @@ -3326,7 +3325,8 @@ int LinearScan::BuildAddrUses(GenTree* addr, regMaskTP candidates) // BuildOperandUses: Build Use RefPositions for an operand that might be contained. // // Arguments: -// node - The node of interest +// node - The node of interest +// candidates - The set of candidates for the uses // // Return Value: // The number of source registers used by the *parent* of this node. @@ -3467,7 +3467,10 @@ void LinearScan::AddDelayFreeUses(RefPosition* useRefPosition, GenTree* rmwNode) // node - The node of interest // rmwNode - The node that has RMW semantics (if applicable) // candidates - The set of candidates for the uses -// useRefPositionRef - If a use refposition is created, returns it. If none created, sets it to nullptr. +// useRefPositionRef - If a use RefPosition is created, returns it. If none created, sets it to nullptr. +// +// REVIEW: useRefPositionRef is not consistently set. Also, sometimes this function creates multiple RefPositions +// but can only return one. Does it matter which one gets returned? // // Return Value: // The number of source registers used by the *parent* of this node. @@ -3531,14 +3534,14 @@ int LinearScan::BuildDelayFreeUses(GenTree* node, GenTreeAddrMode* const addrMode = addr->AsAddrMode(); unsigned srcCount = 0; - if ((addrMode->Base() != nullptr) && !addrMode->Base()->isContained()) + if (addrMode->HasBase() && !addrMode->Base()->isContained()) { use = BuildUse(addrMode->Base(), candidates); AddDelayFreeUses(use, rmwNode); srcCount++; } - if ((addrMode->Index() != nullptr) && !addrMode->Index()->isContained()) + if (addrMode->HasIndex() && !addrMode->Index()->isContained()) { use = BuildUse(addrMode->Index(), candidates); AddDelayFreeUses(use, rmwNode); diff --git a/src/coreclr/jit/lsraloongarch64.cpp b/src/coreclr/jit/lsraloongarch64.cpp index 7ab333630c046..b695e6652f497 100644 --- a/src/coreclr/jit/lsraloongarch64.cpp +++ b/src/coreclr/jit/lsraloongarch64.cpp @@ -441,7 +441,7 @@ int LinearScan::BuildNode(GenTree* tree) if (sizeVal != 0) { // Compute the amount of memory to properly STACK_ALIGN. - // Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size. + // Note: The GenTree node is not updated here as it is cheap to recompute stack aligned size. // This should also help in debugging as we can examine the original size specified with // localloc. sizeVal = AlignUp(sizeVal, STACK_ALIGN); @@ -619,7 +619,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree) // of an indirection operation. // // Arguments: -// indirTree - GT_IND, GT_STOREIND or block gentree node +// indirTree - GT_IND, GT_STOREIND or block GenTree node // // Return Value: // The number of sources consumed by this node. diff --git a/src/coreclr/jit/lsrariscv64.cpp b/src/coreclr/jit/lsrariscv64.cpp index cc7ea03e3b383..117a46f71fad2 100644 --- a/src/coreclr/jit/lsrariscv64.cpp +++ b/src/coreclr/jit/lsrariscv64.cpp @@ -475,7 +475,7 @@ int LinearScan::BuildNode(GenTree* tree) if (sizeVal != 0) { // Compute the amount of memory to properly STACK_ALIGN. - // Note: The Gentree node is not updated here as it is cheap to recompute stack aligned size. + // Note: The GenTree node is not updated here as it is cheap to recompute stack aligned size. // This should also help in debugging as we can examine the original size specified with // localloc. sizeVal = AlignUp(sizeVal, STACK_ALIGN); @@ -669,7 +669,7 @@ int LinearScan::BuildHWIntrinsic(GenTreeHWIntrinsic* intrinsicTree) // of an indirection operation. // // Arguments: -// indirTree - GT_IND, GT_STOREIND or block gentree node +// indirTree - GT_IND, GT_STOREIND or block GenTree node // // Return Value: // The number of sources consumed by this node. diff --git a/src/coreclr/jit/lsraxarch.cpp b/src/coreclr/jit/lsraxarch.cpp index 5d54c08ebb790..522d2e5d0e6eb 100644 --- a/src/coreclr/jit/lsraxarch.cpp +++ b/src/coreclr/jit/lsraxarch.cpp @@ -1269,7 +1269,7 @@ int LinearScan::BuildCall(GenTreeCall* call) if (argNode->OperGet() == GT_FIELD_LIST) { assert(argNode->isContained()); - assert(varTypeIsStruct(argNode) || abiInfo.IsStruct); + assert(varTypeIsStruct(arg.GetSignatureType())); unsigned regIndex = 0; for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses()) @@ -2762,7 +2762,7 @@ int LinearScan::BuildCast(GenTreeCast* cast) // BuildIndir: Specify register requirements for address expression of an indirection operation. // // Arguments: -// indirTree - GT_IND or GT_STOREIND gentree node +// indirTree - GT_IND or GT_STOREIND GenTree node // // Return Value: // The number of sources consumed by this node. diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 39617fb20a27d..6fa8a90d3bdfb 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -827,10 +827,6 @@ void CallArg::Dump(Compiler* comp) { printf(", wellKnown[%s]", getWellKnownArgName(m_wellKnownArg)); } - if (AbiInfo.IsStruct) - { - printf(", isStruct"); - } printf("]\n"); } #endif @@ -880,8 +876,6 @@ void CallArgs::SetTemp(CallArg* arg, unsigned tmpNum) // void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call) { - bool hasStructRegArg = false; - unsigned argCount = CountArgs(); // Previous argument with GTF_EXCEPT @@ -914,17 +908,9 @@ void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call) #if FEATURE_ARG_SPLIT else if (arg.AbiInfo.IsSplit()) { - hasStructRegArg = true; assert(m_hasStackArgs); } -#endif // FEATURE_ARG_SPLIT - else // we have a register argument, next we look for a struct type. - { - if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| arg.AbiInfo.IsStruct)) - { - hasStructRegArg = true; - } - } +#endif // FEATURE_ARG_SPLIT /* If the argument tree contains an assignment (GTF_ASG) then the argument and and every earlier argument (except constants) must be evaluated into temps @@ -2793,9 +2779,8 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call } #endif // TARGET_ARM - arg.AbiInfo = CallArgABIInformation(); - arg.AbiInfo.ArgType = argx->TypeGet(); - arg.AbiInfo.IsStruct = isStructArg; + arg.AbiInfo = CallArgABIInformation(); + arg.AbiInfo.ArgType = argx->TypeGet(); if (isRegArg) { @@ -3014,7 +2999,7 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call arg.AbiInfo.SetMultiRegNums(); - if (arg.AbiInfo.IsStruct) + if (varTypeIsStruct(arg.GetSignatureType())) { arg.AbiInfo.PassedByRef = passStructByRef; arg.AbiInfo.ArgType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType; @@ -3202,10 +3187,7 @@ GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call) argx->gtType = TYP_I_IMPL; } - // Struct arguments may be morphed into a node that is not a struct type. - // In such case the CallArgABIInformation keeps track of whether the original node (before morphing) - // was a struct and the struct classification. - bool isStructArg = arg.AbiInfo.IsStruct; + bool isStructArg = varTypeIsStruct(arg.GetSignatureType()); GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/); bool makeOutArgCopy = false; @@ -4576,8 +4558,8 @@ GenTree* Compiler::fgMorphIndexAddr(GenTreeIndexAddr* indexAddr) // dependency, so make sure this dependency remains visible. Also, the // JIT is not allowed to create arbitrary byrefs, so we must make sure // the address is not reordered with the bounds check. - boundsCheck->gtFlags |= GTF_ORDER_SIDEEFF; - addr->gtFlags |= GTF_ORDER_SIDEEFF; + boundsCheck->SetHasOrderingSideEffect(); + addr->SetHasOrderingSideEffect(); tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), boundsCheck, tree); fgSetRngChkTarget(boundsCheck); @@ -5134,7 +5116,7 @@ GenTree* Compiler::fgMorphExpandInstanceField(GenTree* tree, MorphAddrContext* m GenTree* lclVar = gtNewLclvNode(lclNum, objRefType); GenTree* nullchk = gtNewNullCheck(lclVar, compCurBB); - nullchk->gtFlags |= GTF_ORDER_SIDEEFF; + nullchk->SetHasOrderingSideEffect(); if (store != nullptr) { @@ -5147,10 +5129,6 @@ GenTree* Compiler::fgMorphExpandInstanceField(GenTree* tree, MorphAddrContext* m } addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node. - - // Ensure the creation of the byref does not get reordered with the - // null check, as that could otherwise create an illegal byref. - addr->gtFlags |= GTF_ORDER_SIDEEFF; } else { @@ -9911,18 +9889,18 @@ GenTree* Compiler::fgMorphFinalizeIndir(GenTreeIndir* indir) GenTree* addr = indir->Addr(); #ifdef TARGET_ARM - GenTree* effAddr = addr->gtEffectiveVal(true); - // Check for a misalignment floating point indirection. - if (effAddr->OperIs(GT_ADD) && varTypeIsFloating(indir)) + if (varTypeIsFloating(indir)) { - GenTree* addOp2 = effAddr->gtGetOp2(); - if (addOp2->IsCnsIntOrI()) + // Check for a misaligned floating point indirection. + GenTree* effAddr = addr->gtEffectiveVal(true); + target_ssize_t offset; + FieldSeq* fldSeq; + gtPeelOffsets(&effAddr, &offset, &fldSeq); + + if (((offset % genTypeSize(TYP_FLOAT)) != 0) || + (effAddr->IsCnsIntOrI() && ((effAddr->AsIntConCommon()->IconValue() % genTypeSize(TYP_FLOAT)) != 0))) { - ssize_t offset = addOp2->AsIntCon()->IconValue(); - if ((offset % genTypeSize(TYP_FLOAT)) != 0) - { - indir->gtFlags |= GTF_IND_UNALIGNED; - } + indir->gtFlags |= GTF_IND_UNALIGNED; } } #endif // TARGET_ARM diff --git a/src/coreclr/jit/morphblock.cpp b/src/coreclr/jit/morphblock.cpp index 347845e737094..7e864a5eab25c 100644 --- a/src/coreclr/jit/morphblock.cpp +++ b/src/coreclr/jit/morphblock.cpp @@ -750,9 +750,10 @@ void MorphCopyBlockHelper::MorphStructCases() } } - // Check to see if we are doing a copy to/from the same local block. - // If so, morph it to a nop. - if ((m_dstVarDsc != nullptr) && (m_srcVarDsc == m_dstVarDsc) && (m_dstLclOffset == m_srcLclOffset)) + // Check to see if we are doing a copy to/from the same local block. If so, morph it to a nop. + // Don't do this for SSA definitions as we have no way to update downstream uses. + if ((m_dstVarDsc != nullptr) && (m_srcVarDsc == m_dstVarDsc) && (m_dstLclOffset == m_srcLclOffset) && + !m_store->AsLclVarCommon()->HasSsaIdentity()) { JITDUMP("Self-copy; replaced with a NOP.\n"); m_transformationDecision = BlockTransformation::Nop; diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index a583db4b3562c..11c537226e878 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -5350,11 +5350,8 @@ PhaseStatus Compiler::optOptimizeFlow() noway_assert(opts.OptimizationEnabled()); noway_assert(fgModified == false); - bool madeChanges = false; - - madeChanges |= fgUpdateFlowGraph(/* allowTailDuplication */ true); - madeChanges |= fgReorderBlocks(/* useProfileData */ false); - madeChanges |= fgUpdateFlowGraph(); + fgUpdateFlowGraph(/* doTailDuplication */ true); + fgReorderBlocks(/* useProfile */ false); // fgReorderBlocks can cause IR changes even if it does not modify // the flow graph. It calls gtPrepareCost which can cause operand swapping. @@ -5363,9 +5360,7 @@ PhaseStatus Compiler::optOptimizeFlow() // Note phase status only impacts dumping and checking done post-phase, // it has no impact on a release build. // - madeChanges = true; - - return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; + return PhaseStatus::MODIFIED_EVERYTHING; } //----------------------------------------------------------------------------- @@ -5381,11 +5376,9 @@ PhaseStatus Compiler::optOptimizeLayout() { noway_assert(opts.OptimizationEnabled()); - bool madeChanges = false; - - madeChanges |= fgUpdateFlowGraph(/* allowTailDuplication */ false); - madeChanges |= fgReorderBlocks(/* useProfile */ true); - madeChanges |= fgUpdateFlowGraph(); + fgUpdateFlowGraph(/* doTailDuplication */ false); + fgReorderBlocks(/* useProfile */ true); + fgUpdateFlowGraph(); // fgReorderBlocks can cause IR changes even if it does not modify // the flow graph. It calls gtPrepareCost which can cause operand swapping. @@ -5394,9 +5387,7 @@ PhaseStatus Compiler::optOptimizeLayout() // Note phase status only impacts dumping and checking done post-phase, // it has no impact on a release build. // - madeChanges = true; - - return madeChanges ? PhaseStatus::MODIFIED_EVERYTHING : PhaseStatus::MODIFIED_NOTHING; + return PhaseStatus::MODIFIED_EVERYTHING; } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/promotion.cpp b/src/coreclr/jit/promotion.cpp index 52163f4db0cce..e11d0619905e7 100644 --- a/src/coreclr/jit/promotion.cpp +++ b/src/coreclr/jit/promotion.cpp @@ -802,14 +802,27 @@ class LocalUses if ((cycleImprovementPerInvoc > 0) && ((cycleImprovementPerInvoc * ALLOWED_SIZE_REGRESSION_PER_CYCLE_IMPROVEMENT) >= -sizeImprovement)) { - JITDUMP(" Promoting replacement\n\n"); + JITDUMP(" Promoting replacement (cycle improvement)\n\n"); + return true; + } + + // Similarly, even for a cycle-wise regression, if we see a large size + // wise improvement we may want to promote. The main case is where all + // uses are in blocks with bbWeight=0, but we still estimate a + // size-wise improvement. + const weight_t ALLOWED_CYCLE_REGRESSION_PER_SIZE_IMPROVEMENT = 0.01; + + if ((sizeImprovement > 0) && + ((sizeImprovement * ALLOWED_CYCLE_REGRESSION_PER_SIZE_IMPROVEMENT) >= -cycleImprovementPerInvoc)) + { + JITDUMP(" Promoting replacement (size improvement)\n\n"); return true; } #ifdef DEBUG if (comp->compStressCompile(Compiler::STRESS_PHYSICAL_PROMOTION_COST, 25)) { - JITDUMP(" Promoting replacement due to stress\n\n"); + JITDUMP(" Promoting replacement (stress)\n\n"); return true; } #endif @@ -1229,19 +1242,18 @@ class LocalsUseVisitor : public GenTreeVisitor } #endif - StructSegments unpromotedParts = - m_prom->SignificantSegments(m_compiler->lvaGetDesc(agg->LclNum)->GetLayout()); + agg->Unpromoted = m_prom->SignificantSegments(m_compiler->lvaGetDesc(agg->LclNum)->GetLayout()); for (Replacement& rep : reps) { - unpromotedParts.Subtract(StructSegments::Segment(rep.Offset, rep.Offset + genTypeSize(rep.AccessType))); + agg->Unpromoted.Subtract(StructSegments::Segment(rep.Offset, rep.Offset + genTypeSize(rep.AccessType))); } JITDUMP(" Unpromoted remainder: "); - DBEXEC(m_compiler->verbose, unpromotedParts.Dump()); + DBEXEC(m_compiler->verbose, agg->Unpromoted.Dump()); JITDUMP("\n\n"); StructSegments::Segment unpromotedSegment; - if (unpromotedParts.CoveringSegment(&unpromotedSegment)) + if (agg->Unpromoted.CoveringSegment(&unpromotedSegment)) { agg->UnpromotedMin = unpromotedSegment.Start; agg->UnpromotedMax = unpromotedSegment.End; @@ -1495,6 +1507,31 @@ bool StructSegments::Segment::IntersectsOrAdjacent(const Segment& other) const return true; } +//------------------------------------------------------------------------ +// Intersects: +// Check if this segment intersects another segment. +// +// Parameters: +// other - The other segment. +// +// Returns: +// True if so. +// +bool StructSegments::Segment::Intersects(const Segment& other) const +{ + if (End <= other.Start) + { + return false; + } + + if (other.End <= Start) + { + return false; + } + + return true; +} + //------------------------------------------------------------------------ // Contains: // Check if this segment contains another segment. @@ -1586,7 +1623,7 @@ void StructSegments::Subtract(const Segment& segment) return; } - assert(m_segments[index].IntersectsOrAdjacent(segment)); + assert(m_segments[index].Intersects(segment)); if (m_segments[index].Contains(segment)) { @@ -1679,6 +1716,46 @@ bool StructSegments::CoveringSegment(Segment* result) return true; } +//------------------------------------------------------------------------ +// Intersects: +// Check if a segment intersects with any segment in this segment tree. +// +// Parameters: +// segment - The segment. +// +// Returns: +// True if the input segment intersects with any segment in the tree; +// otherwise false. +// +bool StructSegments::Intersects(const Segment& segment) +{ + size_t index = Promotion::BinarySearch(m_segments, segment.Start); + if ((ssize_t)index < 0) + { + index = ~index; + } + else + { + // Start == segment[index].End, which makes it non-interesting. + index++; + } + + if (index >= m_segments.size()) + { + return false; + } + + // Here we know Start < segment[index].End. Do they not intersect at all? + if (m_segments[index].Start >= segment.End) + { + // Does not intersect any segment. + return false; + } + + assert(m_segments[index].Intersects(segment)); + return true; +} + #ifdef DEBUG //------------------------------------------------------------------------ // Dump: diff --git a/src/coreclr/jit/promotion.h b/src/coreclr/jit/promotion.h index 0efde03da5925..c421b019bc8f9 100644 --- a/src/coreclr/jit/promotion.h +++ b/src/coreclr/jit/promotion.h @@ -60,6 +60,7 @@ class StructSegments } bool IntersectsOrAdjacent(const Segment& other) const; + bool Intersects(const Segment& other) const; bool Contains(const Segment& other) const; void Merge(const Segment& other); }; @@ -68,7 +69,7 @@ class StructSegments jitstd::vector m_segments; public: - StructSegments(CompAllocator allocator) : m_segments(allocator) + explicit StructSegments(CompAllocator allocator) : m_segments(allocator) { } @@ -76,6 +77,7 @@ class StructSegments void Subtract(const Segment& segment); bool IsEmpty(); bool CoveringSegment(Segment* result); + bool Intersects(const Segment& segment); #ifdef DEBUG void Dump(); @@ -87,12 +89,14 @@ struct AggregateInfo { jitstd::vector Replacements; unsigned LclNum; + // Unpromoted parts of the struct local. + StructSegments Unpromoted; // Min offset in the struct local of the unpromoted part. unsigned UnpromotedMin = 0; // Max offset in the struct local of the unpromoted part. unsigned UnpromotedMax = 0; - AggregateInfo(CompAllocator alloc, unsigned lclNum) : Replacements(alloc), LclNum(lclNum) + AggregateInfo(CompAllocator alloc, unsigned lclNum) : Replacements(alloc), LclNum(lclNum), Unpromoted(alloc) { } diff --git a/src/coreclr/jit/promotionliveness.cpp b/src/coreclr/jit/promotionliveness.cpp index 5f9fffdb95a29..77078bddb4c29 100644 --- a/src/coreclr/jit/promotionliveness.cpp +++ b/src/coreclr/jit/promotionliveness.cpp @@ -242,9 +242,8 @@ void PromotionLiveness::MarkUseDef(GenTreeLclVarCommon* lcl, BitVec& useSet, Bit } bool isFullDefOfRemainder = isDef && (agg->UnpromotedMin >= offs) && (agg->UnpromotedMax <= (offs + size)); - // TODO-CQ: We could also try to figure out if a use actually touches the remainder, e.g. in some cases - // a struct use may consist only of promoted fields and does not actually use the remainder. - MarkIndex(baseIndex, isUse, isFullDefOfRemainder, useSet, defSet); + bool isUseOfRemainder = isUse && agg->Unpromoted.Intersects(StructSegments::Segment(offs, offs + size)); + MarkIndex(baseIndex, isUseOfRemainder, isFullDefOfRemainder, useSet, defSet); } } else @@ -609,11 +608,9 @@ void PromotionLiveness::FillInLiveness(BitVec& life, BitVec volatileVars, GenTre } else { - // TODO-CQ: We could also try to figure out if a use actually touches the remainder, e.g. in some cases - // a struct use may consist only of promoted fields and does not actually use the remainder. BitVecOps::AddElemD(&aggTraits, aggDeaths, 0); - if (isUse) + if (isUse && agg->Unpromoted.Intersects(StructSegments::Segment(offs, offs + size))) { BitVecOps::AddElemD(m_bvTraits, life, baseIndex); } diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp index fcb4df3129e23..56e22bb8f75cc 100644 --- a/src/coreclr/jit/rationalize.cpp +++ b/src/coreclr/jit/rationalize.cpp @@ -342,24 +342,12 @@ Compiler::fgWalkResult Rationalizer::RewriteNode(GenTree** useEdge, Compiler::Ge } else { - if (((node->gtFlags & GTF_ASG) != 0) && !node->OperRequiresAsgFlag()) - { - // Clear the GTF_ASG flag for all nodes that do not require it - node->gtFlags &= ~GTF_ASG; - } - - if (!node->IsCall()) - { - // Clear the GTF_CALL flag for all nodes but calls - node->gtFlags &= ~GTF_CALL; - } - if (node->IsValue() && use.IsDummyUse()) { node->SetUnusedValue(); } - if (node->TypeGet() == TYP_LONG) + if (node->TypeIs(TYP_LONG)) { comp->compLongUsed = true; } diff --git a/src/coreclr/jit/regset.cpp b/src/coreclr/jit/regset.cpp index bc8d598ccc174..b87533c0e4e8f 100644 --- a/src/coreclr/jit/regset.cpp +++ b/src/coreclr/jit/regset.cpp @@ -199,11 +199,30 @@ void RegSet::SetMaskVars(regMaskTP newMaskVars) } else { - printRegMaskInt(_rsMaskVars); + printRegMask(_rsMaskVars); m_rsCompiler->GetEmitter()->emitDispRegSet(_rsMaskVars); + + // deadSet = old - new + regMaskTP deadSet = _rsMaskVars & ~newMaskVars; + + // bornSet = new - old + regMaskTP bornSet = newMaskVars & ~_rsMaskVars; + + if (deadSet != RBM_NONE) + { + printf(" -"); + m_rsCompiler->GetEmitter()->emitDispRegSet(deadSet); + } + + if (bornSet != RBM_NONE) + { + printf(" +"); + m_rsCompiler->GetEmitter()->emitDispRegSet(bornSet); + } + printf(" => "); } - printRegMaskInt(newMaskVars); + printRegMask(newMaskVars); m_rsCompiler->GetEmitter()->emitDispRegSet(newMaskVars); printf("\n"); } @@ -490,7 +509,7 @@ TempDsc* RegSet::rsUnspillInPlace(GenTree* tree, regNumber oldReg, unsigned regI // Get the tree's SpillDsc SpillDsc* prevDsc; SpillDsc* spillDsc = rsGetSpillInfo(tree, oldReg, &prevDsc); - PREFIX_ASSUME(spillDsc != nullptr); + assert(spillDsc != nullptr); // Get the temp TempDsc* temp = rsGetSpillTempWord(oldReg, spillDsc, prevDsc); diff --git a/src/coreclr/jit/scopeinfo.cpp b/src/coreclr/jit/scopeinfo.cpp index 7c550d1da087c..ea9a8cde96003 100644 --- a/src/coreclr/jit/scopeinfo.cpp +++ b/src/coreclr/jit/scopeinfo.cpp @@ -57,6 +57,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX #include "emit.h" #include "codegen.h" +//============================================================================ +// siVarLoc functions +//============================================================================ + bool CodeGenInterface::siVarLoc::vlIsInReg(regNumber reg) const { switch (vlType) @@ -583,226 +587,1037 @@ void CodeGenInterface::dumpSiVarLoc(const siVarLoc* varLoc) const unreached(); } } -#endif - -/*============================================================================ - * INTERFACE (public) Functions for ScopeInfo - *============================================================================ - */ +#endif // DEBUG -// Check every CodeGenInterface::siVarLocType and CodeGenInterface::siVarLoc -// are what ICodeDebugInfo is expetecting. -void CodeGen::checkICodeDebugInfo() -{ -#ifdef TARGET_X86 - assert((unsigned)ICorDebugInfo::REGNUM_EAX == REG_EAX); - assert((unsigned)ICorDebugInfo::REGNUM_ECX == REG_ECX); - assert((unsigned)ICorDebugInfo::REGNUM_EDX == REG_EDX); - assert((unsigned)ICorDebugInfo::REGNUM_EBX == REG_EBX); - assert((unsigned)ICorDebugInfo::REGNUM_ESP == REG_ESP); - assert((unsigned)ICorDebugInfo::REGNUM_EBP == REG_EBP); - assert((unsigned)ICorDebugInfo::REGNUM_ESI == REG_ESI); - assert((unsigned)ICorDebugInfo::REGNUM_EDI == REG_EDI); -#endif +#ifdef DEBUG - assert((unsigned)ICorDebugInfo::VLT_REG == CodeGenInterface::VLT_REG); - assert((unsigned)ICorDebugInfo::VLT_STK == CodeGenInterface::VLT_STK); - assert((unsigned)ICorDebugInfo::VLT_REG_REG == CodeGenInterface::VLT_REG_REG); - assert((unsigned)ICorDebugInfo::VLT_REG_STK == CodeGenInterface::VLT_REG_STK); - assert((unsigned)ICorDebugInfo::VLT_STK_REG == CodeGenInterface::VLT_STK_REG); - assert((unsigned)ICorDebugInfo::VLT_STK2 == CodeGenInterface::VLT_STK2); - assert((unsigned)ICorDebugInfo::VLT_FPSTK == CodeGenInterface::VLT_FPSTK); - assert((unsigned)ICorDebugInfo::VLT_FIXED_VA == CodeGenInterface::VLT_FIXED_VA); - assert((unsigned)ICorDebugInfo::VLT_COUNT == CodeGenInterface::VLT_COUNT); - assert((unsigned)ICorDebugInfo::VLT_INVALID == CodeGenInterface::VLT_INVALID); +//------------------------------------------------------------------------ +// VariableLiveRanges dumpers +//------------------------------------------------------------------------ - /* ICorDebugInfo::VarLoc and siVarLoc should overlap exactly as we cast - * one to the other in eeSetLVinfo() - * Below is a "required but not sufficient" condition - */ +// Dump "VariableLiveRange" when code has not been generated and we don't have so the assembly native offset +// but at least "emitLocation"s and "siVarLoc" +void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRange( + const CodeGenInterface* codeGen) const +{ + codeGen->dumpSiVarLoc(&m_VarLocation); - assert(sizeof(ICorDebugInfo::VarLoc) == sizeof(CodeGenInterface::siVarLoc)); + printf(" ["); + m_StartEmitLocation.Print(codeGen->GetCompiler()->compMethodID); + printf(", "); + if (m_EndEmitLocation.Valid()) + { + m_EndEmitLocation.Print(codeGen->GetCompiler()->compMethodID); + } + else + { + printf("..."); + } + printf("]"); } -void CodeGen::siInit() +// Dump "VariableLiveRange" when code has been generated and we have the assembly native offset of each "emitLocation" +void CodeGenInterface::VariableLiveKeeper::VariableLiveRange::dumpVariableLiveRange( + emitter* emit, const CodeGenInterface* codeGen) const { - checkICodeDebugInfo(); - - assert(compiler->opts.compScopeInfo); + assert(emit != nullptr); -#if defined(FEATURE_EH_FUNCLETS) - if (compiler->info.compVarScopesCount > 0) - { - siInFuncletRegion = false; - } -#endif // FEATURE_EH_FUNCLETS + // "VariableLiveRanges" are created setting its location ("m_VarLocation") and the initial native offset + // ("m_StartEmitLocation") + codeGen->dumpSiVarLoc(&m_VarLocation); - siLastEndOffs = 0; + // If this is an open "VariableLiveRange", "m_EndEmitLocation" is non-valid and print -1 + UNATIVE_OFFSET endAssemblyOffset = m_EndEmitLocation.Valid() ? m_EndEmitLocation.CodeOffset(emit) : -1; - compiler->compResetScopeLists(); + printf(" [%X, %X)", m_StartEmitLocation.CodeOffset(emit), m_EndEmitLocation.CodeOffset(emit)); } -/***************************************************************************** - * siBeginBlock - * - * Called at the beginning of code-gen for a block. Checks if any scopes - * need to be opened. - */ +//------------------------------------------------------------------------ +// LiveRangeDumper +//------------------------------------------------------------------------ -void CodeGen::siBeginBlock(BasicBlock* block) +//------------------------------------------------------------------------ +// resetDumper: If the "liveRange" has its last "VariableLiveRange" closed, it points +// the "LiveRangeDumper" to end of "liveRange" (nullptr). Otherwise, +// it points the "LiveRangeDumper" to the last "VariableLiveRange" of +// "liveRange", which is opened. +// +// Arguments: +// liveRanges - the "LiveRangeList" of the "VariableLiveDescriptor" we want to +// update its "LiveRangeDumper". +// +// Notes: +// This method is expected to be called once the code for a BasicBlock has been +// generated and all the new "VariableLiveRange"s of the variable during this block +// has been dumped. +// +void CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::resetDumper(const LiveRangeList* liveRanges) { - assert(block != nullptr); - - if (!compiler->opts.compScopeInfo) - { - return; - } + // There must have reported something in order to reset + assert(m_hasLiveRangesToDump); - if (compiler->info.compVarScopesCount == 0) + if (liveRanges->back().m_EndEmitLocation.Valid()) { - return; + // the last "VariableLiveRange" is closed and the variable + // is no longer alive + m_hasLiveRangesToDump = false; } - -#if defined(FEATURE_EH_FUNCLETS) - if (siInFuncletRegion) + else { - return; + // the last "VariableLiveRange" remains opened because it is + // live at "BasicBlock"s "bbLiveOut". + m_startingLiveRange = liveRanges->backPosition(); } +} - if (block->bbFlags & BBF_FUNCLET_BEG) - { - // For now, don't report any scopes in funclets. JIT64 doesn't. - siInFuncletRegion = true; +//------------------------------------------------------------------------ +// setDumperStartAt: Make "LiveRangeDumper" instance point at the last "VariableLiveRange" +// added so we can start dumping from there after the "BasicBlock"s code is generated. +// +// Arguments: +// liveRangeIt - an iterator to a position in "VariableLiveDescriptor::m_VariableLiveRanges" +// +void CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::setDumperStartAt(const LiveRangeListIterator liveRangeIt) +{ + m_hasLiveRangesToDump = true; + m_startingLiveRange = liveRangeIt; +} - JITDUMP("Scope info: found beginning of funclet region at block " FMT_BB "; ignoring following blocks\n", - block->bbNum); +//------------------------------------------------------------------------ +// getStartForDump: Return an iterator to the first "VariableLiveRange" edited/added +// during the current "BasicBlock" +// +// Return Value: +// A LiveRangeListIterator to the first "VariableLiveRange" in "LiveRangeList" which +// was used during last "BasicBlock". +// +CodeGenInterface::VariableLiveKeeper::LiveRangeListIterator CodeGenInterface::VariableLiveKeeper::LiveRangeDumper:: + getStartForDump() const +{ + return m_startingLiveRange; +} - return; - } -#endif // FEATURE_EH_FUNCLETS +//------------------------------------------------------------------------ +// hasLiveRangesToDump: Return whether at least a "VariableLiveRange" was alive during +// the current "BasicBlock"'s code generation +// +// Return Value: +// A boolean indicating indicating if there is at least a "VariableLiveRange" +// that has been used for the variable during last "BasicBlock". +// +bool CodeGenInterface::VariableLiveKeeper::LiveRangeDumper::hasLiveRangesToDump() const +{ + return m_hasLiveRangesToDump; +} -#ifdef DEBUG - if (verbose) - { - printf("\nScope info: begin block " FMT_BB ", IL range ", block->bbNum); - block->dspBlockILRange(); - printf("\n"); - } #endif // DEBUG - unsigned beginOffs = block->bbCodeOffs; +//------------------------------------------------------------------------ +// VariableLiveDescriptor +//------------------------------------------------------------------------ - if (beginOffs == BAD_IL_OFFSET) +CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::VariableLiveDescriptor( + CompAllocator allocator DEBUG_ARG(unsigned varNum)) +{ + // Initialize an empty list + m_VariableLiveRanges = new (allocator) LiveRangeList(allocator); + + INDEBUG(m_VariableLifeBarrier = new (allocator) LiveRangeDumper(m_VariableLiveRanges)); + INDEBUG(m_varNum = varNum); +} + +//------------------------------------------------------------------------ +// hasVariableLiveRangeOpen: Return true if the variable is still alive, +// false in other case. +// +bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVariableLiveRangeOpen() const +{ + return !m_VariableLiveRanges->empty() && !m_VariableLiveRanges->back().m_EndEmitLocation.Valid(); +} + +//------------------------------------------------------------------------ +// getLiveRanges: Return the list of variable locations for this variable. +// +// Return Value: +// A const LiveRangeList* pointing to the first variable location if it has +// any or the end of the list in other case. +// +CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor:: + getLiveRanges() const +{ + return m_VariableLiveRanges; +} + +//------------------------------------------------------------------------ +// startLiveRangeFromEmitter: Report this variable as being born in "varLocation" +// at the instruction where "emit" is located. +// +// Arguments: +// varLocation - the home of the variable. +// emit - an emitter* instance located at the first instruction where "varLocation" becomes valid. +// +// Assumptions: +// This variable is being born so it should currently be dead. +// +// Notes: +// The position of "emit" matters to ensure intervals inclusive of the +// beginning and exclusive of the end. +// +void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::startLiveRangeFromEmitter( + CodeGenInterface::siVarLoc varLocation, emitter* emit) const +{ + noway_assert(emit != nullptr); + + // Is the first "VariableLiveRange" or the previous one has been closed so its "m_EndEmitLocation" is valid + noway_assert(m_VariableLiveRanges->empty() || m_VariableLiveRanges->back().m_EndEmitLocation.Valid()); + + if (!m_VariableLiveRanges->empty() && + siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) && + m_VariableLiveRanges->back().m_EndEmitLocation.IsPreviousInsNum(emit)) { - JITDUMP("Scope info: ignoring block beginning\n"); - return; + JITDUMP("Debug: Extending V%02u debug range...\n", m_varNum); + + // The variable is being born just after the instruction at which it died. + // In this case, i.e. an update of the variable's value, we coalesce the live ranges. + m_VariableLiveRanges->back().m_EndEmitLocation.Init(); + } + else + { + JITDUMP("Debug: New V%02u debug range: %s\n", m_varNum, + m_VariableLiveRanges->empty() + ? "first" + : siVarLoc::Equals(&varLocation, &(m_VariableLiveRanges->back().m_VarLocation)) + ? "new var or location" + : "not adjacent"); + // Creates new live range with invalid end + m_VariableLiveRanges->emplace_back(varLocation, emitLocation(), emitLocation()); + m_VariableLiveRanges->back().m_StartEmitLocation.CaptureLocation(emit); } - // If we have tracked locals, use liveness to update the debug state. - // - // Note: we can improve on this some day -- if there are any tracked - // locals, untracked locals will fail to be reported. - if (compiler->lvaTrackedCount <= 0) +#ifdef DEBUG + if (!m_VariableLifeBarrier->hasLiveRangesToDump()) { - siOpenScopesForNonTrackedVars(block, siLastEndOffs); + m_VariableLifeBarrier->setDumperStartAt(m_VariableLiveRanges->backPosition()); } +#endif // DEBUG + + // m_startEmitLocation must be Valid. m_EndEmitLocation must not be valid. + noway_assert(m_VariableLiveRanges->back().m_StartEmitLocation.Valid()); + noway_assert(!m_VariableLiveRanges->back().m_EndEmitLocation.Valid()); } //------------------------------------------------------------------------ -// siOpenScopesForNonTrackedVars: If optimizations are disable, it will open -// a "siScope" for each variable which has a "VarScopeDsc" (input of the JIT) -// and is referenced at least once. If optimizations are applied, nothing is done. +// endLiveRangeAtEmitter: Report this variable as becoming dead starting at the +// instruction where "emit" is located. // // Arguments: -// block - the block whose code is going to be generated. -// lastBlockILEndOffset - the IL offset at the ending of the last generated basic block. +// emit - an emitter* instance located at the first instruction where +// this variable becomes dead. +// +// Assumptions: +// This variable is becoming dead so it should currently be alive. // // Notes: -// When there we are jitting methods compiled in debug mode, no variable is -// tracked and there is no info that shows variable liveness like block->bbLiveIn. -// On debug code variables are not enregistered the whole method so we can just -// report them as beign born from here on the stack until the whole method is -// generated. +// The position of "emit" matters to ensure intervals inclusive of the +// beginning and exclusive of the end. // -void CodeGen::siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned int lastBlockILEndOffset) +void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::endLiveRangeAtEmitter(emitter* emit) const { - unsigned int beginOffs = block->bbCodeOffs; + noway_assert(emit != nullptr); + noway_assert(hasVariableLiveRangeOpen()); - // There aren't any tracked locals. - // - // For debuggable or minopts code, scopes can begin only on block boundaries. - // For other codegen modes (eg minopts/tier0) we currently won't report any - // untracked locals. - if (compiler->opts.OptimizationDisabled()) - { - // Check if there are any scopes on the current block's start boundary. - VarScopeDsc* varScope = nullptr; + // Using [close, open) ranges so as to not compute the size of the last instruction + m_VariableLiveRanges->back().m_EndEmitLocation.CaptureLocation(emit); -#if defined(FEATURE_EH_FUNCLETS) + JITDUMP("Debug: Closing V%02u debug range.\n", m_varNum); - // If we find a spot where the code offset isn't what we expect, because - // there is a gap, it might be because we've moved the funclets out of - // line. Catch up with the enter and exit scopes of the current block. - // Ignore the enter/exit scope changes of the missing scopes, which for - // funclets must be matched. - if (lastBlockILEndOffset != beginOffs) - { - assert(beginOffs > 0); - assert(lastBlockILEndOffset < beginOffs); + // m_EndEmitLocation must be Valid + noway_assert(m_VariableLiveRanges->back().m_EndEmitLocation.Valid()); +} - JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", lastBlockILEndOffset, beginOffs); +//------------------------------------------------------------------------ +// updateLiveRangeAtEmitter: Report this variable as changing its variable +// home to "varLocation" at the instruction where "emit" is located. +// +// Arguments: +// varLocation - the new variable location. +// emit - an emitter* instance located at the first instruction where "varLocation" becomes valid. +// +// Assumptions: +// This variable should already be alive. +// +// Notes: +// The position of "emit" matters to ensure intervals inclusive of the +// beginning and exclusive of the end. +// +void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::updateLiveRangeAtEmitter( + CodeGenInterface::siVarLoc varLocation, emitter* emit) const +{ + // This variable is changing home so it has been started before during this block + noway_assert(m_VariableLiveRanges != nullptr && !m_VariableLiveRanges->empty()); - // Skip enter scopes - while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr) - { - /* do nothing */ - JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum); - } + // And its last m_EndEmitLocation has to be invalid + noway_assert(!m_VariableLiveRanges->back().m_EndEmitLocation.Valid()); - // Skip exit scopes - while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr) - { - /* do nothing */ - JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum); - } - } + // If we are reporting again the same home, that means we are doing something twice? + // noway_assert(! CodeGenInterface::siVarLoc::Equals(&m_VariableLiveRanges->back().m_VarLocation, varLocation)); -#else // !FEATURE_EH_FUNCLETS + // Close previous live range + endLiveRangeAtEmitter(emit); - if (lastBlockILEndOffset != beginOffs) + startLiveRangeFromEmitter(varLocation, emit); +} + +#ifdef DEBUG +void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpAllRegisterLiveRangesForBlock( + emitter* emit, const CodeGenInterface* codeGen) const +{ + bool first = true; + for (LiveRangeListIterator it = m_VariableLiveRanges->begin(); it != m_VariableLiveRanges->end(); it++) + { + if (!first) { - assert(lastBlockILEndOffset < beginOffs); - return; + printf("; "); } + it->dumpVariableLiveRange(emit, codeGen); + first = false; + } +} -#endif // !FEATURE_EH_FUNCLETS - - while ((varScope = compiler->compGetNextEnterScope(beginOffs)) != nullptr) +void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::dumpRegisterLiveRangesForBlockBeforeCodeGenerated( + const CodeGenInterface* codeGen) const +{ + bool first = true; + for (LiveRangeListIterator it = m_VariableLifeBarrier->getStartForDump(); it != m_VariableLiveRanges->end(); it++) + { + if (!first) { - LclVarDsc* lclVarDsc = compiler->lvaGetDesc(varScope->vsdVarNum); - - // Only report locals that were referenced, if we're not doing debug codegen - if (compiler->opts.compDbgCode || (lclVarDsc->lvRefCnt() > 0)) - { - // brace-matching editor workaround for following line: ( - JITDUMP("Scope info: opening scope, LVnum=%u [%03X..%03X)\n", varScope->vsdLVnum, varScope->vsdLifeBeg, - varScope->vsdLifeEnd); - - varLiveKeeper->siStartVariableLiveRange(lclVarDsc, varScope->vsdVarNum); - - INDEBUG(assert(!lclVarDsc->lvTracked || - VarSetOps::IsMember(compiler, block->bbLiveIn, lclVarDsc->lvVarIndex))); - } - else - { - JITDUMP("Skipping open scope for V%02u, unreferenced\n", varScope->vsdVarNum); - } + printf("; "); } + it->dumpVariableLiveRange(codeGen); + first = false; } } -/***************************************************************************** - * siEndBlock +// Returns true if a live range for this variable has been recorded +bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVarLiveRangesToDump() const +{ + return !m_VariableLiveRanges->empty(); +} + +// Returns true if a live range for this variable has been recorded from last call to EndBlock +bool CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::hasVarLiveRangesFromLastBlockToDump() const +{ + return m_VariableLifeBarrier->hasLiveRangesToDump(); +} + +// Reset the barrier so as to dump only next block changes on next block +void CodeGenInterface::VariableLiveKeeper::VariableLiveDescriptor::endBlockLiveRanges() +{ + // make "m_VariableLifeBarrier->m_startingLiveRange" now points to nullptr for printing purposes + m_VariableLifeBarrier->resetDumper(m_VariableLiveRanges); +} +#endif // DEBUG + +//------------------------------------------------------------------------ +// VariableLiveKeeper +//------------------------------------------------------------------------ + +// Initialize structures for VariableLiveRanges +void CodeGenInterface::initializeVariableLiveKeeper() +{ + CompAllocator allocator = compiler->getAllocator(CMK_VariableLiveRanges); + + int amountTrackedVariables = compiler->opts.compDbgInfo ? compiler->info.compLocalsCount : 0; + int amountTrackedArgs = compiler->opts.compDbgInfo ? compiler->info.compArgsCount : 0; + + varLiveKeeper = new (allocator) VariableLiveKeeper(amountTrackedVariables, amountTrackedArgs, compiler, allocator); +} + +CodeGenInterface::VariableLiveKeeper* CodeGenInterface::getVariableLiveKeeper() const +{ + return varLiveKeeper; +}; + +//------------------------------------------------------------------------ +// VariableLiveKeeper: Create an instance of the object in charge of managing +// VariableLiveRanges and initialize the array "m_vlrLiveDsc". +// +// Arguments: +// totalLocalCount - the count of args, special args and IL Local +// variables in the method. +// argsCount - the count of args and special args in the method. +// compiler - a compiler instance +// +CodeGenInterface::VariableLiveKeeper::VariableLiveKeeper(unsigned int totalLocalCount, + unsigned int argsCount, + Compiler* comp, + CompAllocator allocator) + : m_LiveDscCount(totalLocalCount) + , m_LiveArgsCount(argsCount) + , m_Compiler(comp) + , m_LastBasicBlockHasBeenEmitted(false) +{ + if (m_LiveDscCount > 0) + { + // Allocate memory for "m_vlrLiveDsc" and initialize each "VariableLiveDescriptor" + m_vlrLiveDsc = allocator.allocate(m_LiveDscCount); + m_vlrLiveDscForProlog = allocator.allocate(m_LiveDscCount); + + for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) + { + new (m_vlrLiveDsc + varNum, jitstd::placement_t()) VariableLiveDescriptor(allocator DEBUG_ARG(varNum)); + new (m_vlrLiveDscForProlog + varNum, jitstd::placement_t()) + VariableLiveDescriptor(allocator DEBUG_ARG(varNum)); + } + } +} + +//------------------------------------------------------------------------ +// siStartOrCloseVariableLiveRange: Reports the given variable as being born or becoming dead. +// +// Arguments: +// varDsc - the variable for which a location changed will be reported +// varNum - the index of the variable in "lvaTable" +// isBorn - true if the variable is being born where the emitter is located. +// isDying - true if the variable is dying where the emitter is located. +// +// Assumptions: +// The emitter should be located on the first instruction where +// the variable is becoming valid (when isBorn is true) or invalid (when isDying is true). +// +// Notes: +// This method is being called from treeLifeUpdater when the variable is being born, +// becoming dead, or both. +// +void CodeGenInterface::VariableLiveKeeper::siStartOrCloseVariableLiveRange(const LclVarDsc* varDsc, + unsigned int varNum, + bool isBorn, + bool isDying) +{ + noway_assert(varDsc != nullptr); + + // Only the variables that exists in the IL, "this", and special arguments + // are reported. + if (m_Compiler->opts.compDbgInfo && varNum < m_LiveDscCount) + { + if (isBorn && !isDying) + { + // "varDsc" is valid from this point + siStartVariableLiveRange(varDsc, varNum); + } + if (isDying && !isBorn) + { + // this variable live range is no longer valid from this point + siEndVariableLiveRange(varNum); + } + } +} + +//------------------------------------------------------------------------ +// siStartOrCloseVariableLiveRanges: Iterates the given set of variables +// calling "siStartOrCloseVariableLiveRange" with each one. +// +// Arguments: +// varsIndexSet - the set of variables to report start/end "VariableLiveRange" +// isBorn - whether the set is being born from where the emitter is located. +// isDying - whether the set is dying from where the emitter is located. +// +// Assumptions: +// The emitter should be located on the first instruction from where is true that +// the variable becoming valid (when isBorn is true) or invalid (when isDying is true). +// +// Notes: +// This method is being called from treeLifeUpdater when a set of variables +// is being born, becoming dead, or both. +// +void CodeGenInterface::VariableLiveKeeper::siStartOrCloseVariableLiveRanges(VARSET_VALARG_TP varsIndexSet, + bool isBorn, + bool isDying) +{ + if (m_Compiler->opts.compDbgInfo) + { + VarSetOps::Iter iter(m_Compiler, varsIndexSet); + unsigned varIndex = 0; + while (iter.NextElem(&varIndex)) + { + unsigned int varNum = m_Compiler->lvaTrackedIndexToLclNum(varIndex); + const LclVarDsc* varDsc = m_Compiler->lvaGetDesc(varNum); + siStartOrCloseVariableLiveRange(varDsc, varNum, isBorn, isDying); + } + } +} + +//------------------------------------------------------------------------ +// siStartVariableLiveRange: Reports the given variable as being born. +// +// Arguments: +// varDsc - the variable descriptor for which a location change will be reported +// varNum - the variable number +// +// Assumptions: +// The emitter should be pointing to the first instruction where the VariableLiveRange is +// becoming valid. +// The given "varDsc" should have its VariableRangeLists initialized. +// +// Notes: +// This method should be called at every location where a variable is becoming live. +// +void CodeGenInterface::VariableLiveKeeper::siStartVariableLiveRange(const LclVarDsc* varDsc, unsigned int varNum) +{ + noway_assert(varDsc != nullptr); + + // Only the variables that exists in the IL, "this", and special arguments are reported, as long as they were + // allocated. + if (m_Compiler->opts.compDbgInfo && (varNum < m_LiveDscCount) && (varDsc->lvIsInReg() || varDsc->lvOnFrame)) + { + // Build siVarLoc for this born "varDsc" + CodeGenInterface::siVarLoc varLocation = + m_Compiler->codeGen->getSiVarLoc(varDsc, m_Compiler->codeGen->getCurrentStackLevel()); + + VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum]; + // this variable live range is valid from this point + varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter()); + } +} + +//------------------------------------------------------------------------ +// siEndVariableLiveRange: Reports the variable as becoming dead. +// +// Arguments: +// varNum - the index of the variable at m_vlrLiveDsc or lvaTable in that +// is becoming dead. +// +// Assumptions: +// The given variable should be alive. +// The emitter should be pointing to the first instruction where the VariableLiveRange is +// becoming invalid. +// +// Notes: +// This method should be called at every location where a variable is becoming dead. +// +void CodeGenInterface::VariableLiveKeeper::siEndVariableLiveRange(unsigned int varNum) +{ + // Only the variables that exists in the IL, "this", and special arguments + // will be reported. + + // This method is being called from genUpdateLife, which is called after + // code for BasicBlock has been generated, but the emitter no longer has + // a valid IG so we don't report the close of a "VariableLiveRange" after code is + // emitted. + + if (m_Compiler->opts.compDbgInfo && (varNum < m_LiveDscCount) && !m_LastBasicBlockHasBeenEmitted && + m_vlrLiveDsc[varNum].hasVariableLiveRangeOpen()) + { + // this variable live range is no longer valid from this point + m_vlrLiveDsc[varNum].endLiveRangeAtEmitter(m_Compiler->GetEmitter()); + } +} + +//------------------------------------------------------------------------ +// siUpdateVariableLiveRange: Reports the change of variable location for the +// given variable. +// +// Arguments: +// varDsc - the variable descriptor for which the home has changed. +// varNum - the variable number +// +// Assumptions: +// The given variable should be alive. +// The emitter should be pointing to the first instruction where +// the new variable location is becoming valid. +// +void CodeGenInterface::VariableLiveKeeper::siUpdateVariableLiveRange(const LclVarDsc* varDsc, unsigned int varNum) +{ + noway_assert(varDsc != nullptr); + + // Only the variables that exist in the IL, "this", and special arguments + // will be reported. These are locals and arguments, and are counted in + // "info.compLocalsCount". + + // This method is being called when the prolog is being generated, and + // the emitter no longer has a valid IG so we don't report the close of + // a "VariableLiveRange" after code is emitted. + if (m_Compiler->opts.compDbgInfo && (varNum < m_LiveDscCount) && !m_LastBasicBlockHasBeenEmitted) + { + // Build the location of the variable + CodeGenInterface::siVarLoc siVarLoc = + m_Compiler->codeGen->getSiVarLoc(varDsc, m_Compiler->codeGen->getCurrentStackLevel()); + + // Report the home change for this variable + VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDsc[varNum]; + varLiveDsc->updateLiveRangeAtEmitter(siVarLoc, m_Compiler->GetEmitter()); + } +} + +//------------------------------------------------------------------------ +// siEndAllVariableLiveRange: Reports the set of variables as becoming dead. +// +// Arguments: +// newLife - the set of variables that are becoming dead. +// +// Assumptions: +// All the variables in the set are alive. +// +// Notes: +// This method is called when the last block being generated to killed all +// the live variables and set a flag to avoid reporting variable locations for +// on next calls to method that update variable liveness. +// +void CodeGenInterface::VariableLiveKeeper::siEndAllVariableLiveRange(VARSET_VALARG_TP varsToClose) +{ + if (m_Compiler->opts.compDbgInfo) + { + if (m_Compiler->lvaTrackedCount > 0 || !m_Compiler->opts.OptimizationDisabled()) + { + VarSetOps::Iter iter(m_Compiler, varsToClose); + unsigned varIndex = 0; + while (iter.NextElem(&varIndex)) + { + unsigned int varNum = m_Compiler->lvaTrackedIndexToLclNum(varIndex); + siEndVariableLiveRange(varNum); + } + } + else + { + // It seems we are compiling debug code, so we don't have variable + // liveness info + siEndAllVariableLiveRange(); + } + } + + m_LastBasicBlockHasBeenEmitted = true; +} + +//------------------------------------------------------------------------ +// siEndAllVariableLiveRange: Reports all live variables as dead. +// +// Notes: +// This overload exists for the case we are compiling code compiled in +// debug mode. When that happen we don't have variable liveness info +// as "BasicBlock::bbLiveIn" or "BasicBlock::bbLiveOut" and there is no +// tracked variable. +// +void CodeGenInterface::VariableLiveKeeper::siEndAllVariableLiveRange() +{ + // TODO: we can improve this keeping a set for the variables with + // open VariableLiveRanges + + for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) + { + const VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum; + if (varLiveDsc->hasVariableLiveRangeOpen()) + { + siEndVariableLiveRange(varNum); + } + } +} + +//------------------------------------------------------------------------ +// getLiveRangesForVarForBody: Return the "VariableLiveRange" that correspond to +// the given "varNum". +// +// Arguments: +// varNum - the index of the variable in m_vlrLiveDsc, which is the same as +// in lvaTable. +// +// Return Value: +// A const pointer to the list of variable locations reported for the variable. +// +// Assumptions: +// This variable should be an argument, a special argument or an IL local +// variable. +CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::getLiveRangesForVarForBody( + unsigned int varNum) const +{ + // There should be at least one variable for which its liveness is tracked + noway_assert(varNum < m_LiveDscCount); + + return m_vlrLiveDsc[varNum].getLiveRanges(); +} + +//------------------------------------------------------------------------ +// getLiveRangesForVarForProlog: Return the "VariableLiveRange" that correspond to +// the given "varNum". +// +// Arguments: +// varNum - the index of the variable in m_vlrLiveDsc, which is the same as +// in lvaTable. +// +// Return Value: +// A const pointer to the list of variable locations reported for the variable. +// +// Assumptions: +// This variable should be an argument, a special argument or an IL local +// variable. +CodeGenInterface::VariableLiveKeeper::LiveRangeList* CodeGenInterface::VariableLiveKeeper::getLiveRangesForVarForProlog( + unsigned int varNum) const +{ + // There should be at least one variable for which its liveness is tracked + noway_assert(varNum < m_LiveDscCount); + + return m_vlrLiveDscForProlog[varNum].getLiveRanges(); +} + +//------------------------------------------------------------------------ +// getLiveRangesCount: Returns the count of variable locations reported for the tracked +// variables, which are arguments, special arguments, and local IL variables. +// +// Return Value: +// size_t - the count of variable locations +// +// Notes: +// This method is being called from "genSetScopeInfo" to know the count of +// "varResultInfo" that should be created on eeSetLVcount. +// +size_t CodeGenInterface::VariableLiveKeeper::getLiveRangesCount() const +{ + size_t liveRangesCount = 0; + + if (m_Compiler->opts.compDbgInfo) + { + for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) + { + for (int i = 0; i < 2; i++) + { + VariableLiveDescriptor* varLiveDsc = (i == 0 ? m_vlrLiveDscForProlog : m_vlrLiveDsc) + varNum; + + if (m_Compiler->compMap2ILvarNum(varNum) != (unsigned int)ICorDebugInfo::UNKNOWN_ILNUM) + { + liveRangesCount += varLiveDsc->getLiveRanges()->size(); + } + } + } + } + return liveRangesCount; +} + +//------------------------------------------------------------------------ +// psiStartVariableLiveRange: Reports the given variable as being born. +// +// Arguments: +// varLocation - the variable location +// varNum - the index of the variable in "compiler->lvaTable" or +// "VariableLiveKeeper->m_vlrLiveDsc" +// +// Notes: +// This function is expected to be called from "psiBegProlog" during +// prolog code generation. +// +void CodeGenInterface::VariableLiveKeeper::psiStartVariableLiveRange(CodeGenInterface::siVarLoc varLocation, + unsigned int varNum) +{ + // This descriptor has to correspond to a parameter. The first slots in lvaTable + // are arguments and special arguments. + noway_assert(varNum < m_LiveArgsCount); + + VariableLiveDescriptor* varLiveDsc = &m_vlrLiveDscForProlog[varNum]; + varLiveDsc->startLiveRangeFromEmitter(varLocation, m_Compiler->GetEmitter()); +} + +//------------------------------------------------------------------------ +// psiClosePrologVariableRanges: Report all the parameters as becoming dead. +// +// Notes: +// This function is expected to be called from "psiEndProlog" after +// code for prolog has been generated. +// +void CodeGenInterface::VariableLiveKeeper::psiClosePrologVariableRanges() +{ + noway_assert(m_LiveArgsCount <= m_LiveDscCount); + + for (unsigned int varNum = 0; varNum < m_LiveArgsCount; varNum++) + { + VariableLiveDescriptor* varLiveDsc = m_vlrLiveDscForProlog + varNum; + + if (varLiveDsc->hasVariableLiveRangeOpen()) + { + varLiveDsc->endLiveRangeAtEmitter(m_Compiler->GetEmitter()); + } + } +} + +#ifdef DEBUG +void CodeGenInterface::VariableLiveKeeper::dumpBlockVariableLiveRanges(const BasicBlock* block) +{ + assert(block != nullptr); + + bool hasDumpedHistory = false; + + printf("\nVariable Live Range History Dump for " FMT_BB "\n", block->bbNum); + + if (m_Compiler->opts.compDbgInfo) + { + for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) + { + VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum; + + if (varLiveDsc->hasVarLiveRangesFromLastBlockToDump()) + { + hasDumpedHistory = true; + m_Compiler->gtDispLclVar(varNum, false); + printf(": "); + varLiveDsc->dumpRegisterLiveRangesForBlockBeforeCodeGenerated(m_Compiler->codeGen); + varLiveDsc->endBlockLiveRanges(); + printf("\n"); + } + } + } + + if (!hasDumpedHistory) + { + printf("..None..\n"); + } +} + +void CodeGenInterface::VariableLiveKeeper::dumpLvaVariableLiveRanges() const +{ + bool hasDumpedHistory = false; + + printf("VARIABLE LIVE RANGES:\n"); + + if (m_Compiler->opts.compDbgInfo) + { + for (unsigned int varNum = 0; varNum < m_LiveDscCount; varNum++) + { + VariableLiveDescriptor* varLiveDsc = m_vlrLiveDsc + varNum; + + if (varLiveDsc->hasVarLiveRangesToDump()) + { + hasDumpedHistory = true; + m_Compiler->gtDispLclVar(varNum, false); + printf(": "); + varLiveDsc->dumpAllRegisterLiveRangesForBlock(m_Compiler->GetEmitter(), m_Compiler->codeGen); + printf("\n"); + } + } + } + + if (!hasDumpedHistory) + { + printf("..None..\n"); + } +} +#endif // DEBUG + +/*============================================================================ + * INTERFACE (public) Functions for ScopeInfo + *============================================================================ + */ + +// Check every CodeGenInterface::siVarLocType and CodeGenInterface::siVarLoc +// are what ICodeDebugInfo is expecting. +void CodeGen::checkICodeDebugInfo() +{ +#ifdef TARGET_X86 + assert((unsigned)ICorDebugInfo::REGNUM_EAX == REG_EAX); + assert((unsigned)ICorDebugInfo::REGNUM_ECX == REG_ECX); + assert((unsigned)ICorDebugInfo::REGNUM_EDX == REG_EDX); + assert((unsigned)ICorDebugInfo::REGNUM_EBX == REG_EBX); + assert((unsigned)ICorDebugInfo::REGNUM_ESP == REG_ESP); + assert((unsigned)ICorDebugInfo::REGNUM_EBP == REG_EBP); + assert((unsigned)ICorDebugInfo::REGNUM_ESI == REG_ESI); + assert((unsigned)ICorDebugInfo::REGNUM_EDI == REG_EDI); +#endif + + assert((unsigned)ICorDebugInfo::VLT_REG == CodeGenInterface::VLT_REG); + assert((unsigned)ICorDebugInfo::VLT_STK == CodeGenInterface::VLT_STK); + assert((unsigned)ICorDebugInfo::VLT_REG_REG == CodeGenInterface::VLT_REG_REG); + assert((unsigned)ICorDebugInfo::VLT_REG_STK == CodeGenInterface::VLT_REG_STK); + assert((unsigned)ICorDebugInfo::VLT_STK_REG == CodeGenInterface::VLT_STK_REG); + assert((unsigned)ICorDebugInfo::VLT_STK2 == CodeGenInterface::VLT_STK2); + assert((unsigned)ICorDebugInfo::VLT_FPSTK == CodeGenInterface::VLT_FPSTK); + assert((unsigned)ICorDebugInfo::VLT_FIXED_VA == CodeGenInterface::VLT_FIXED_VA); + assert((unsigned)ICorDebugInfo::VLT_COUNT == CodeGenInterface::VLT_COUNT); + assert((unsigned)ICorDebugInfo::VLT_INVALID == CodeGenInterface::VLT_INVALID); + + /* ICorDebugInfo::VarLoc and siVarLoc should overlap exactly as we cast + * one to the other in eeSetLVinfo() + * Below is a "required but not sufficient" condition + */ + + assert(sizeof(ICorDebugInfo::VarLoc) == sizeof(CodeGenInterface::siVarLoc)); +} + +void CodeGen::siInit() +{ + checkICodeDebugInfo(); + + assert(compiler->opts.compScopeInfo); + +#if defined(FEATURE_EH_FUNCLETS) + if (compiler->info.compVarScopesCount > 0) + { + siInFuncletRegion = false; + } +#endif // FEATURE_EH_FUNCLETS + + siLastEndOffs = 0; + + compiler->compResetScopeLists(); +} + +/***************************************************************************** + * siBeginBlock + * + * Called at the beginning of code-gen for a block. Checks if any scopes + * need to be opened. + */ + +void CodeGen::siBeginBlock(BasicBlock* block) +{ + assert(block != nullptr); + + if (!compiler->opts.compScopeInfo) + { + return; + } + + if (compiler->info.compVarScopesCount == 0) + { + return; + } + +#if defined(FEATURE_EH_FUNCLETS) + if (siInFuncletRegion) + { + return; + } + + if (block->bbFlags & BBF_FUNCLET_BEG) + { + // For now, don't report any scopes in funclets. JIT64 doesn't. + siInFuncletRegion = true; + + JITDUMP("Scope info: found beginning of funclet region at block " FMT_BB "; ignoring following blocks\n", + block->bbNum); + + return; + } +#endif // FEATURE_EH_FUNCLETS + +#ifdef DEBUG + if (verbose) + { + printf("\nScope info: begin block " FMT_BB ", IL range ", block->bbNum); + block->dspBlockILRange(); + printf("\n"); + } +#endif // DEBUG + + unsigned beginOffs = block->bbCodeOffs; + + if (beginOffs == BAD_IL_OFFSET) + { + JITDUMP("Scope info: ignoring block beginning\n"); + return; + } + + // If we have tracked locals, use liveness to update the debug state. + // + // Note: we can improve on this some day -- if there are any tracked + // locals, untracked locals will fail to be reported. + if (compiler->lvaTrackedCount <= 0) + { + siOpenScopesForNonTrackedVars(block, siLastEndOffs); + } +} + +//------------------------------------------------------------------------ +// siOpenScopesForNonTrackedVars: If optimizations are disable, it will open +// a "siScope" for each variable which has a "VarScopeDsc" (input of the JIT) +// and is referenced at least once. If optimizations are applied, nothing is done. +// +// Arguments: +// block - the block whose code is going to be generated. +// lastBlockILEndOffset - the IL offset at the ending of the last generated basic block. +// +// Notes: +// When there we are jitting methods compiled in debug mode, no variable is +// tracked and there is no info that shows variable liveness like block->bbLiveIn. +// On debug code variables are not enregistered the whole method so we can just +// report them as beign born from here on the stack until the whole method is +// generated. +// +void CodeGen::siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned int lastBlockILEndOffset) +{ + unsigned int beginOffs = block->bbCodeOffs; + + // There aren't any tracked locals. + // + // For debuggable or minopts code, scopes can begin only on block boundaries. + // For other codegen modes (eg minopts/tier0) we currently won't report any + // untracked locals. + if (compiler->opts.OptimizationDisabled()) + { + // Check if there are any scopes on the current block's start boundary. + VarScopeDsc* varScope = nullptr; + +#if defined(FEATURE_EH_FUNCLETS) + + // If we find a spot where the code offset isn't what we expect, because + // there is a gap, it might be because we've moved the funclets out of + // line. Catch up with the enter and exit scopes of the current block. + // Ignore the enter/exit scope changes of the missing scopes, which for + // funclets must be matched. + if (lastBlockILEndOffset != beginOffs) + { + assert(beginOffs > 0); + assert(lastBlockILEndOffset < beginOffs); + + JITDUMP("Scope info: found offset hole. lastOffs=%u, currOffs=%u\n", lastBlockILEndOffset, beginOffs); + + // Skip enter scopes + while ((varScope = compiler->compGetNextEnterScope(beginOffs - 1, true)) != nullptr) + { + /* do nothing */ + JITDUMP("Scope info: skipping enter scope, LVnum=%u\n", varScope->vsdLVnum); + } + + // Skip exit scopes + while ((varScope = compiler->compGetNextExitScope(beginOffs - 1, true)) != nullptr) + { + /* do nothing */ + JITDUMP("Scope info: skipping exit scope, LVnum=%u\n", varScope->vsdLVnum); + } + } + +#else // !FEATURE_EH_FUNCLETS + + if (lastBlockILEndOffset != beginOffs) + { + assert(lastBlockILEndOffset < beginOffs); + return; + } + +#endif // !FEATURE_EH_FUNCLETS + + while ((varScope = compiler->compGetNextEnterScope(beginOffs)) != nullptr) + { + LclVarDsc* lclVarDsc = compiler->lvaGetDesc(varScope->vsdVarNum); + + // Only report locals that were referenced, if we're not doing debug codegen + if (compiler->opts.compDbgCode || (lclVarDsc->lvRefCnt() > 0)) + { + // brace-matching editor workaround for following line: ( + JITDUMP("Scope info: opening scope, LVnum=%u [%03X..%03X)\n", varScope->vsdLVnum, varScope->vsdLifeBeg, + varScope->vsdLifeEnd); + + varLiveKeeper->siStartVariableLiveRange(lclVarDsc, varScope->vsdVarNum); + + INDEBUG(assert(!lclVarDsc->lvTracked || + VarSetOps::IsMember(compiler, block->bbLiveIn, lclVarDsc->lvVarIndex))); + } + else + { + JITDUMP("Skipping open scope for V%02u, unreferenced\n", varScope->vsdVarNum); + } + } + } +} + +/***************************************************************************** + * siEndBlock * * Called at the end of code-gen for a block. Any closing scopes are marked * as such. Note that if we are collecting LocalVar info, scopes can @@ -1001,3 +1816,314 @@ void CodeGen::psiEndProlog() assert(compiler->compGeneratingProlog); varLiveKeeper->psiClosePrologVariableRanges(); } + +/***************************************************************************** + * genSetScopeInfo + * + * This function should be called only after the sizes of the emitter blocks + * have been finalized. + */ + +void CodeGen::genSetScopeInfo() +{ + if (!compiler->opts.compScopeInfo) + { + return; + } + +#ifdef DEBUG + if (verbose) + { + printf("*************** In genSetScopeInfo()\n"); + } +#endif + + unsigned varsLocationsCount = 0; + + varsLocationsCount = (unsigned int)varLiveKeeper->getLiveRangesCount(); + + if (varsLocationsCount == 0) + { + // No variable home to report + compiler->eeSetLVcount(0); + compiler->eeSetLVdone(); + return; + } + + noway_assert(compiler->opts.compScopeInfo && (compiler->info.compVarScopesCount > 0)); + + // Initialize the table where the reported variables' home will be placed. + compiler->eeSetLVcount(varsLocationsCount); + +#ifdef DEBUG + genTrnslLocalVarCount = varsLocationsCount; + if (varsLocationsCount) + { + genTrnslLocalVarInfo = new (compiler, CMK_DebugOnly) TrnslLocalVarInfo[varsLocationsCount]; + } +#endif + + // We can have one of both flags defined, both, or none. Specially if we need to compare both + // both results. But we cannot report both to the debugger, since there would be overlapping + // intervals, and may not indicate the same variable location. + + genSetScopeInfoUsingVariableRanges(); + + compiler->eeSetLVdone(); +} + +//------------------------------------------------------------------------ +// genSetScopeInfoUsingVariableRanges: Call "genSetScopeInfo" with the +// "VariableLiveRanges" created for the arguments, special arguments and +// IL local variables. +// +// Notes: +// This function is called from "genSetScopeInfo" once the code is generated +// and we want to send debug info to the debugger. +// +void CodeGen::genSetScopeInfoUsingVariableRanges() +{ + unsigned int liveRangeIndex = 0; + + for (unsigned int varNum = 0; varNum < compiler->info.compLocalsCount; varNum++) + { + LclVarDsc* varDsc = compiler->lvaGetDesc(varNum); + + if (compiler->compMap2ILvarNum(varNum) == (unsigned int)ICorDebugInfo::UNKNOWN_ILNUM) + { + continue; + } + + auto reportRange = [this, varDsc, varNum, &liveRangeIndex](siVarLoc* loc, UNATIVE_OFFSET start, + UNATIVE_OFFSET end) { + if (varDsc->lvIsParam && (start == end)) + { + // If the length is zero, it means that the prolog is empty. In that case, + // CodeGen::genSetScopeInfo will report the liveness of all arguments + // as spanning the first instruction in the method, so that they can + // at least be inspected on entry to the method. + end++; + } + + if (start < end) + { + genSetScopeInfo(liveRangeIndex, start, end - start, varNum, varNum, true, loc); + liveRangeIndex++; + } + }; + + siVarLoc* curLoc = nullptr; + UNATIVE_OFFSET curStart = 0; + UNATIVE_OFFSET curEnd = 0; + + for (int rangeIndex = 0; rangeIndex < 2; rangeIndex++) + { + VariableLiveKeeper::LiveRangeList* liveRanges; + if (rangeIndex == 0) + { + liveRanges = varLiveKeeper->getLiveRangesForVarForProlog(varNum); + } + else + { + liveRanges = varLiveKeeper->getLiveRangesForVarForBody(varNum); + } + + for (VariableLiveKeeper::VariableLiveRange& liveRange : *liveRanges) + { + UNATIVE_OFFSET startOffs = liveRange.m_StartEmitLocation.CodeOffset(GetEmitter()); + UNATIVE_OFFSET endOffs = liveRange.m_EndEmitLocation.CodeOffset(GetEmitter()); + + assert(startOffs <= endOffs); + assert(startOffs >= curEnd); + if ((curLoc != nullptr) && (startOffs == curEnd) && siVarLoc::Equals(curLoc, &liveRange.m_VarLocation)) + { + // Extend current range. + curEnd = endOffs; + continue; + } + + // Report old range if any. + if (curLoc != nullptr) + { + reportRange(curLoc, curStart, curEnd); + } + + // Start a new range. + curLoc = &liveRange.m_VarLocation; + curStart = startOffs; + curEnd = endOffs; + } + } + + // Report last range + if (curLoc != nullptr) + { + reportRange(curLoc, curStart, curEnd); + } + } + + compiler->eeVarsCount = liveRangeIndex; +} + +//------------------------------------------------------------------------ +// genSetScopeInfo: Record scope information for debug info +// +// Arguments: +// which +// startOffs - the starting offset for this scope +// length - the length of this scope +// varNum - the lclVar for this scope info +// LVnum +// avail - a bool indicating if it has a home +// varLoc - the position (reg or stack) of the variable +// +// Notes: +// Called for every scope info piece to record by the main genSetScopeInfo() + +void CodeGen::genSetScopeInfo(unsigned which, + UNATIVE_OFFSET startOffs, + UNATIVE_OFFSET length, + unsigned varNum, + unsigned LVnum, + bool avail, + siVarLoc* varLoc) +{ + // We need to do some mapping while reporting back these variables. + + unsigned ilVarNum = compiler->compMap2ILvarNum(varNum); + noway_assert((int)ilVarNum != ICorDebugInfo::UNKNOWN_ILNUM); + +#ifdef TARGET_X86 + // Non-x86 platforms are allowed to access all arguments directly + // so we don't need this code. + + // Is this a varargs function? + if (compiler->info.compIsVarArgs && varNum != compiler->lvaVarargsHandleArg && + varNum < compiler->info.compArgsCount && !compiler->lvaGetDesc(varNum)->lvIsRegArg) + { + noway_assert(varLoc->vlType == VLT_STK || varLoc->vlType == VLT_STK2); + + // All stack arguments (except the varargs handle) have to be + // accessed via the varargs cookie. Discard generated info, + // and just find its position relative to the varargs handle + + PREFIX_ASSUME(compiler->lvaVarargsHandleArg < compiler->info.compArgsCount); + if (!compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->lvOnFrame) + { + noway_assert(!compiler->opts.compDbgCode); + return; + } + + // Can't check compiler->lvaTable[varNum].lvOnFrame as we don't set it for + // arguments of vararg functions to avoid reporting them to GC. + noway_assert(!compiler->lvaGetDesc(varNum)->lvRegister); + unsigned cookieOffset = compiler->lvaGetDesc(compiler->lvaVarargsHandleArg)->GetStackOffset(); + unsigned varOffset = compiler->lvaGetDesc(varNum)->GetStackOffset(); + + noway_assert(cookieOffset < varOffset); + unsigned offset = varOffset - cookieOffset; + unsigned stkArgSize = compiler->compArgSize - intRegState.rsCalleeRegArgCount * REGSIZE_BYTES; + noway_assert(offset < stkArgSize); + offset = stkArgSize - offset; + + varLoc->vlType = VLT_FIXED_VA; + varLoc->vlFixedVarArg.vlfvOffset = offset; + } + +#endif // TARGET_X86 + + VarName name = nullptr; + +#ifdef DEBUG + + for (unsigned scopeNum = 0; scopeNum < compiler->info.compVarScopesCount; scopeNum++) + { + if (LVnum == compiler->info.compVarScopes[scopeNum].vsdLVnum) + { + name = compiler->info.compVarScopes[scopeNum].vsdName; + } + } + + // Hang on to this compiler->info. + + TrnslLocalVarInfo& tlvi = genTrnslLocalVarInfo[which]; + + tlvi.tlviVarNum = ilVarNum; + tlvi.tlviLVnum = LVnum; + tlvi.tlviName = name; + tlvi.tlviStartPC = startOffs; + tlvi.tlviLength = length; + tlvi.tlviAvailable = avail; + tlvi.tlviVarLoc = *varLoc; + +#endif // DEBUG + + compiler->eeSetLVinfo(which, startOffs, length, ilVarNum, *varLoc); +} + +/*****************************************************************************/ +#ifdef LATE_DISASM +#if defined(DEBUG) +/***************************************************************************** + * CompilerRegName + * + * Can be called only after lviSetLocalVarInfo() has been called + */ + +/* virtual */ +const char* CodeGen::siRegVarName(size_t offs, size_t size, unsigned reg) +{ + if (!compiler->opts.compScopeInfo) + return nullptr; + + if (compiler->info.compVarScopesCount == 0) + return nullptr; + + noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo); + + for (unsigned i = 0; i < genTrnslLocalVarCount; i++) + { + if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsInReg((regNumber)reg)) && + (genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) && + (genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs)) + { + return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL; + } + } + + return NULL; +} + +/***************************************************************************** + * CompilerStkName + * + * Can be called only after lviSetLocalVarInfo() has been called + */ + +/* virtual */ +const char* CodeGen::siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs) +{ + if (!compiler->opts.compScopeInfo) + return nullptr; + + if (compiler->info.compVarScopesCount == 0) + return nullptr; + + noway_assert(genTrnslLocalVarCount == 0 || genTrnslLocalVarInfo); + + for (unsigned i = 0; i < genTrnslLocalVarCount; i++) + { + if ((genTrnslLocalVarInfo[i].tlviVarLoc.vlIsOnStack((regNumber)reg, stkOffs)) && + (genTrnslLocalVarInfo[i].tlviAvailable == true) && (genTrnslLocalVarInfo[i].tlviStartPC <= offs + size) && + (genTrnslLocalVarInfo[i].tlviStartPC + genTrnslLocalVarInfo[i].tlviLength > offs)) + { + return genTrnslLocalVarInfo[i].tlviName ? compiler->VarNameToStr(genTrnslLocalVarInfo[i].tlviName) : NULL; + } + } + + return NULL; +} + +/*****************************************************************************/ +#endif // defined(DEBUG) +#endif // LATE_DISASM diff --git a/src/coreclr/jit/sideeffects.cpp b/src/coreclr/jit/sideeffects.cpp index cf0b3210e3853..3627e5fa3083b 100644 --- a/src/coreclr/jit/sideeffects.cpp +++ b/src/coreclr/jit/sideeffects.cpp @@ -464,7 +464,7 @@ SideEffectSet::SideEffectSet(Compiler* compiler, GenTree* node) : m_sideEffectFl // void SideEffectSet::AddNode(Compiler* compiler, GenTree* node) { - m_sideEffectFlags |= (node->gtFlags & GTF_ALL_EFFECT); + m_sideEffectFlags |= node->OperEffects(compiler); m_aliasSet.AddNode(compiler, node); } @@ -571,7 +571,7 @@ bool SideEffectSet::InterferesWith(const SideEffectSet& other, bool strict) cons // bool SideEffectSet::InterferesWith(Compiler* compiler, GenTree* node, bool strict) const { - return InterferesWith((node->gtFlags & GTF_ALL_EFFECT), AliasSet::NodeInfo(compiler, node), strict); + return InterferesWith(node->OperEffects(compiler), AliasSet::NodeInfo(compiler, node), strict); } //------------------------------------------------------------------------ diff --git a/src/coreclr/jit/treelifeupdater.cpp b/src/coreclr/jit/treelifeupdater.cpp index c86460aa05ae2..9ae6d3cd02f74 100644 --- a/src/coreclr/jit/treelifeupdater.cpp +++ b/src/coreclr/jit/treelifeupdater.cpp @@ -140,17 +140,17 @@ void TreeLifeUpdater::UpdateLifeVar(GenTree* tree, GenTreeLclVarComm StoreCurrentLifeForDump(); - bool isBorn = ((lclVarTree->gtFlags & GTF_VAR_DEF) != 0) && ((lclVarTree->gtFlags & GTF_VAR_USEASG) == 0); + const bool isBorn = ((lclVarTree->gtFlags & GTF_VAR_DEF) != 0) && ((lclVarTree->gtFlags & GTF_VAR_USEASG) == 0); if (varDsc->lvTracked) { assert(!varDsc->lvPromoted && !lclVarTree->IsMultiRegLclVar()); - bool isDying = (lclVarTree->gtFlags & GTF_VAR_DEATH) != 0; + const bool isDying = (lclVarTree->gtFlags & GTF_VAR_DEATH) != 0; if (isBorn || isDying) { - bool previouslyLive = + const bool previouslyLive = ForCodeGen && VarSetOps::IsMember(compiler, compiler->compCurLife, varDsc->lvVarIndex); UpdateLifeBit(compiler->compCurLife, varDsc, isBorn, isDying); @@ -161,8 +161,8 @@ void TreeLifeUpdater::UpdateLifeVar(GenTree* tree, GenTreeLclVarComm compiler->codeGen->genUpdateVarReg(varDsc, tree); } - bool isInReg = varDsc->lvIsInReg() && (tree->GetRegNum() != REG_NA); - bool isInMemory = !isInReg || varDsc->IsAlwaysAliveInMemory(); + const bool isInReg = varDsc->lvIsInReg() && (tree->GetRegNum() != REG_NA); + const bool isInMemory = !isInReg || varDsc->IsAlwaysAliveInMemory(); if (isInReg) { compiler->codeGen->genUpdateRegLife(varDsc, isBorn, isDying DEBUGARG(tree)); @@ -198,7 +198,8 @@ void TreeLifeUpdater::UpdateLifeVar(GenTree* tree, GenTreeLclVarComm } else if (varDsc->lvPromoted) { - bool isMultiRegLocal = lclVarTree->IsMultiRegLclVar(); + const bool isMultiRegLocal = lclVarTree->IsMultiRegLclVar(); + #ifdef DEBUG if (isMultiRegLocal) { @@ -208,15 +209,15 @@ void TreeLifeUpdater::UpdateLifeVar(GenTree* tree, GenTreeLclVarComm } #endif - bool isAnyFieldDying = lclVarTree->HasLastUse(); + const bool isAnyFieldDying = lclVarTree->HasLastUse(); if (isBorn || isAnyFieldDying) { - unsigned firstFieldVarNum = varDsc->lvFieldLclStart; + const unsigned firstFieldVarNum = varDsc->lvFieldLclStart; for (unsigned i = 0; i < varDsc->lvFieldCnt; ++i) { - unsigned fldLclNum = firstFieldVarNum + i; - LclVarDsc* fldVarDsc = compiler->lvaGetDesc(fldLclNum); + const unsigned fldLclNum = firstFieldVarNum + i; + LclVarDsc* fldVarDsc = compiler->lvaGetDesc(fldLclNum); assert(fldVarDsc->lvIsStructField); if (!fldVarDsc->lvTracked) { @@ -225,9 +226,9 @@ void TreeLifeUpdater::UpdateLifeVar(GenTree* tree, GenTreeLclVarComm continue; } - bool previouslyLive = + const bool previouslyLive = ForCodeGen && VarSetOps::IsMember(compiler, compiler->compCurLife, fldVarDsc->lvVarIndex); - bool isDying = lclVarTree->IsLastUse(i); + const bool isDying = lclVarTree->IsLastUse(i); UpdateLifeBit(compiler->compCurLife, fldVarDsc, isBorn, isDying); if (!ForCodeGen) @@ -239,8 +240,8 @@ void TreeLifeUpdater::UpdateLifeVar(GenTree* tree, GenTreeLclVarComm // IsMultiRegLclVar() returns true. assert(isMultiRegLocal || !fldVarDsc->lvIsInReg()); - bool isInReg = fldVarDsc->lvIsInReg() && (lclVarTree->AsLclVar()->GetRegNumByIdx(i) != REG_NA); - bool isInMemory = !isInReg || fldVarDsc->IsAlwaysAliveInMemory(); + const bool isInReg = fldVarDsc->lvIsInReg() && (lclVarTree->AsLclVar()->GetRegNumByIdx(i) != REG_NA); + const bool isInMemory = !isInReg || fldVarDsc->IsAlwaysAliveInMemory(); if (isInReg) { @@ -363,8 +364,27 @@ void TreeLifeUpdater::DumpLifeDelta(GenTree* tree) #ifdef DEBUG if (compiler->verbose && !VarSetOps::Equal(compiler, oldLife, compiler->compCurLife)) { - printf("\t\t\t\tLive vars after [%06u]: ", Compiler::dspTreeID(tree)); + printf("\t\t\t\t\t\t\tLive vars after [%06u]: ", Compiler::dspTreeID(tree)); dumpConvertedVarSet(compiler, oldLife); + + // deadSet = oldLife - compCurLife + VARSET_TP deadSet(VarSetOps::Diff(compiler, oldLife, compiler->compCurLife)); + + // bornSet = compCurLife - oldLife + VARSET_TP bornSet(VarSetOps::Diff(compiler, compiler->compCurLife, oldLife)); + + if (!VarSetOps::IsEmpty(compiler, deadSet)) + { + printf(" -"); + dumpConvertedVarSet(compiler, deadSet); + } + + if (!VarSetOps::IsEmpty(compiler, bornSet)) + { + printf(" +"); + dumpConvertedVarSet(compiler, bornSet); + } + printf(" => "); dumpConvertedVarSet(compiler, compiler->compCurLife); printf("\n"); @@ -373,7 +393,7 @@ void TreeLifeUpdater::DumpLifeDelta(GenTree* tree) if (ForCodeGen && compiler->verbose && !VarSetOps::Equal(compiler, oldStackPtrsLife, compiler->codeGen->gcInfo.gcVarPtrSetCur)) { - printf("\t\t\t\tGC vars after [%06u]: ", Compiler::dspTreeID(tree)); + printf("\t\t\t\t\t\t\tGC vars after [%06u]: ", Compiler::dspTreeID(tree)); dumpConvertedVarSet(compiler, oldStackPtrsLife); printf(" => "); dumpConvertedVarSet(compiler, compiler->codeGen->gcInfo.gcVarPtrSetCur); diff --git a/src/coreclr/jit/unwindarmarch.cpp b/src/coreclr/jit/unwindarmarch.cpp index b54ce016c16a5..e53917ffda9c2 100644 --- a/src/coreclr/jit/unwindarmarch.cpp +++ b/src/coreclr/jit/unwindarmarch.cpp @@ -1834,6 +1834,22 @@ void UnwindInfo::Split() #ifdef DEBUG // Consider DOTNET_JitSplitFunctionSize unsigned splitFunctionSize = (unsigned)JitConfig.JitSplitFunctionSize(); + if (splitFunctionSize == 0) + { + // If the split configuration is not set, then sometimes set it during stress. + // Use two stress modes: a split size of 4 (extreme) and a split size of 200 (reasonable). + if (uwiComp->compStressCompile(Compiler::STRESS_UNWIND, 10)) + { + if (uwiComp->compStressCompile(Compiler::STRESS_UNWIND, 5)) + { + splitFunctionSize = 4; + } + else + { + splitFunctionSize = 200; + } + } + } if (splitFunctionSize != 0) if (splitFunctionSize < maxFragmentSize) diff --git a/src/coreclr/jit/valuenumtype.h b/src/coreclr/jit/valuenumtype.h index 5ed44e280f8f7..2eb3254e3e18b 100644 --- a/src/coreclr/jit/valuenumtype.h +++ b/src/coreclr/jit/valuenumtype.h @@ -4,7 +4,7 @@ // Defines the type "ValueNum". // This file exists only to break an include file cycle -- had been in ValueNum.h. But that -// file wanted to include gentree.h to get GT_COUNT, and gentree.h wanted ton include ValueNum.h to +// file wanted to include gentree.h to get GT_COUNT, and gentree.h wanted to include ValueNum.h for // the ValueNum type. /*****************************************************************************/ diff --git a/src/coreclr/nativeaot/Bootstrap/main.cpp b/src/coreclr/nativeaot/Bootstrap/main.cpp index 57c2c69f14089..ed343d16ed8c2 100644 --- a/src/coreclr/nativeaot/Bootstrap/main.cpp +++ b/src/coreclr/nativeaot/Bootstrap/main.cpp @@ -104,7 +104,7 @@ extern "C" bool RhRegisterOSModule(void * pModule, extern "C" void* PalGetModuleHandleFromPointer(void* pointer); extern "C" void GetRuntimeException(); -extern "C" void FailFast(); +extern "C" void RuntimeFailFast(); extern "C" void AppendExceptionStackFrame(); extern "C" void GetSystemArrayEEType(); extern "C" void OnFirstChanceException(); @@ -122,7 +122,7 @@ typedef void(*pfn)(); static const pfn c_classlibFunctions[] = { &GetRuntimeException, - &FailFast, + &RuntimeFailFast, nullptr, // &UnhandledExceptionHandler, &AppendExceptionStackFrame, nullptr, // &CheckStaticClassConstruction, diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs index 01475cf9addef..c47770bed0947 100644 --- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs +++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs @@ -62,12 +62,12 @@ internal static class InternalCalls [RuntimeExport("RhCollect")] internal static void RhCollect(int generation, InternalGCCollectionMode mode, bool lowMemoryP = false) { - RhpCollect(generation, mode, lowMemoryP); + RhpCollect(generation, mode, lowMemoryP ? Interop.BOOL.TRUE : Interop.BOOL.FALSE); } [DllImport(Redhawk.BaseName)] [UnmanagedCallConv(CallConvs = new Type[] { typeof(CallConvCdecl) })] - private static extern void RhpCollect(int generation, InternalGCCollectionMode mode, bool lowMemoryP); + private static extern void RhpCollect(int generation, InternalGCCollectionMode mode, Interop.BOOL lowMemoryP); [RuntimeExport("RhGetGcTotalMemory")] internal static long RhGetGcTotalMemory() diff --git a/src/coreclr/nativeaot/Runtime/DebugHeader.cpp b/src/coreclr/nativeaot/Runtime/DebugHeader.cpp index 0519a3098d245..b01339f812031 100644 --- a/src/coreclr/nativeaot/Runtime/DebugHeader.cpp +++ b/src/coreclr/nativeaot/Runtime/DebugHeader.cpp @@ -77,7 +77,7 @@ struct DotNetRuntimeDebugHeader // This counter can be incremented to indicate breaking changes // This field must be encoded little endian, regardless of the typical endianness of // the machine - const uint16_t MajorVersion = 3; + const uint16_t MajorVersion = 4; // This counter can be incremented to indicate back-compatible changes // This field must be encoded little endian, regardless of the typical endianness of @@ -269,7 +269,7 @@ extern "C" void PopulateDebugHeaders() static_assert(MethodTable::Flags::EETypeKindMask == 0x00030000, "The debugging data contract has a hard coded dependency on this value of MethodTable::Flags. If you change this value you must bump major_version_number."); static_assert(MethodTable::Flags::HasFinalizerFlag == 0x00100000, "The debugging data contract has a hard coded dependency on this value of MethodTable::Flags. If you change this value you must bump major_version_number."); - static_assert(MethodTable::Flags::HasPointersFlag == 0x00200000, "The debugging data contract has a hard coded dependency on this value of MethodTable::Flags. If you change this value you must bump major_version_number."); + static_assert(MethodTable::Flags::HasPointersFlag == 0x01000000, "The debugging data contract has a hard coded dependency on this value of MethodTable::Flags. If you change this value you must bump major_version_number."); static_assert(MethodTable::Flags::GenericVarianceFlag == 0x00800000, "The debugging data contract has a hard coded dependency on this value of MethodTable::Flags. If you change this value you must bump major_version_number."); static_assert(MethodTable::Flags::IsGenericFlag == 0x02000000, "The debugging data contract has a hard coded dependency on this value of MethodTable::Flags. If you change this value you must bump major_version_number."); static_assert(MethodTable::Flags::ElementTypeMask == 0x7C000000, "The debugging data contract has a hard coded dependency on this value of MethodTable::Flags. If you change this value you must bump major_version_number."); diff --git a/src/coreclr/nativeaot/Runtime/EtwEvents.h b/src/coreclr/nativeaot/Runtime/EtwEvents.h deleted file mode 100644 index a02d7577f2518..0000000000000 --- a/src/coreclr/nativeaot/Runtime/EtwEvents.h +++ /dev/null @@ -1,890 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -// shipping criteria: no EVENTPIPE-NATIVEAOT-TODO left in the codebase -// @TODO: Use genEtwProvider.py to generate headers to replace this file. -// Reconcile tracking/callbacks/contexts used for ETW vs EventPipe. -// FireEtXplat* functions handle ETW only. The naming matches the generated -// output of genEtwProvider.py. -#ifndef __RH_ETW_DEFS_INCLUDED -#define __RH_ETW_DEFS_INCLUDED - -#if defined(FEATURE_ETW) && !defined(DACCESS_COMPILE) - -#include - -#ifndef RH_ETW_INLINE -#define RH_ETW_INLINE __declspec(noinline) __inline -#endif - -typedef struct _MCGEN_TRACE_CONTEXT -{ - TRACEHANDLE RegistrationHandle; - TRACEHANDLE Logger; - ULONGLONG MatchAnyKeyword; - ULONGLONG MatchAllKeyword; - ULONG Flags; - ULONG IsEnabled; - unsigned char Level; - unsigned char Reserve; - unsigned short EnableBitsCount; - ULONG * EnableBitMask; - const ULONGLONG* EnableKeyWords; - const unsigned char* EnableLevel; -} MCGEN_TRACE_CONTEXT, *PMCGEN_TRACE_CONTEXT; - -__declspec(noinline) __inline void __stdcall -EtwCallback(GUID * /*SourceId*/, uint32_t IsEnabled, uint8_t Level, uint64_t MatchAnyKeyword, uint64_t MatchAllKeyword, EVENT_FILTER_DESCRIPTOR * FilterData, void * CallbackContext); - -__declspec(noinline) __inline bool __stdcall -RhEventTracingEnabled(MCGEN_TRACE_CONTEXT * EnableInfo, - const EVENT_DESCRIPTOR * EventDescriptor) -{ - if (!EnableInfo) - return false; - if ((EventDescriptor->Level <= EnableInfo->Level) || (EnableInfo->Level == 0)) - { - if ((EventDescriptor->Keyword == (ULONGLONG)0) || - ((EventDescriptor->Keyword & EnableInfo->MatchAnyKeyword) && - ((EventDescriptor->Keyword & EnableInfo->MatchAllKeyword) == EnableInfo->MatchAllKeyword))) - return true; - } - return false; -} - -#define ETW_EVENT_ENABLED(Context, EventDescriptor) (Context.IsEnabled && RhEventTracingEnabled(&Context, &EventDescriptor)) - -extern "C" __declspec(selectany) const GUID MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER = {0x763fd754, 0x7086, 0x4dfe, {0x95, 0xeb, 0xc0, 0x1a, 0x46, 0xfa, 0xf4, 0xca}}; - -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGC1stConEnd = {0xd, 0x0, 0x10, 0x4, 0x1b, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGC1stNonConEnd = {0xc, 0x0, 0x10, 0x4, 0x1a, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGC2ndConBegin = {0x10, 0x0, 0x10, 0x4, 0x1e, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGC2ndConEnd = {0x11, 0x0, 0x10, 0x4, 0x1f, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGC2ndNonConBegin = {0xe, 0x0, 0x10, 0x4, 0x1c, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGC2ndNonConEnd = {0xf, 0x0, 0x10, 0x4, 0x1d, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGCAllocWaitBegin = {0x17, 0x0, 0x10, 0x4, 0x25, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGCAllocWaitEnd = {0x18, 0x0, 0x10, 0x4, 0x26, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGCBegin = {0xb, 0x0, 0x10, 0x4, 0x19, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGCDrainMark = {0x14, 0x0, 0x10, 0x4, 0x22, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGCOverflow = {0x16, 0x0, 0x10, 0x4, 0x24, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGCPlanEnd = {0x12, 0x0, 0x10, 0x4, 0x20, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGCRevisit = {0x15, 0x0, 0x10, 0x4, 0x23, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BGCSweepEnd = {0x13, 0x0, 0x10, 0x4, 0x21, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCFullNotify_V1 = {0x19, 0x1, 0x10, 0x4, 0x13, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCGlobalHeapHistory_V1 = {0x5, 0x1, 0x10, 0x4, 0x12, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCJoin_V1 = {0x6, 0x1, 0x10, 0x5, 0x14, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCOptimized_V1 = {0x3, 0x1, 0x10, 0x5, 0x10, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCPerHeapHistory = {0x4, 0x2, 0x10, 0x4, 0x11, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCSettings = {0x2, 0x0, 0x10, 0x4, 0xe, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR PinPlugAtGCTime = {0xc7, 0x0, 0x10, 0x5, 0x2c, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR PrvDestroyGCHandle = {0xc3, 0x0, 0x10, 0x5, 0x2b, 0x1, 0x8000000000004000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR PrvGCMarkCards_V1 = {0xa, 0x1, 0x10, 0x4, 0x18, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR PrvGCMarkFinalizeQueueRoots_V1 = {0x8, 0x1, 0x10, 0x4, 0x16, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR PrvGCMarkHandles_V1 = {0x9, 0x1, 0x10, 0x4, 0x17, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR PrvGCMarkStackRoots_V1 = {0x7, 0x1, 0x10, 0x4, 0x15, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR PrvSetGCHandle = {0xc2, 0x0, 0x10, 0x5, 0x2a, 0x1, 0x8000000000004000}; - -extern "C" __declspec(selectany) REGHANDLE Microsoft_Windows_DotNETRuntimePrivateHandle; -extern "C" __declspec(selectany) MCGEN_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context; - -#define RH_ETW_REGISTER_Microsoft_Windows_DotNETRuntimePrivate() do { PalEventRegister(&MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER, EtwCallback, &MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, &Microsoft_Windows_DotNETRuntimePrivateHandle); } while (false) -#define RH_ETW_UNREGISTER_Microsoft_Windows_DotNETRuntimePrivate() do { PalEventUnregister(Microsoft_Windows_DotNETRuntimePrivateHandle); } while (false) - -#define FireEtXplatBGC1stConEnd(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC1stConEnd)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC1stConEnd, ClrInstanceID) : 0 - -#define FireEtXplatBGC1stNonConEnd(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC1stNonConEnd)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC1stNonConEnd, ClrInstanceID) : 0 - -#define FireEtXplatBGC2ndConBegin(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC2ndConBegin)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC2ndConBegin, ClrInstanceID) : 0 - -#define FireEtXplatBGC2ndConEnd(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC2ndConEnd)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC2ndConEnd, ClrInstanceID) : 0 - -#define FireEtXplatBGC2ndNonConBegin(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC2ndNonConBegin)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC2ndNonConBegin, ClrInstanceID) : 0 - -#define FireEtXplatBGC2ndNonConEnd(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC2ndNonConEnd)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGC2ndNonConEnd, ClrInstanceID) : 0 - -#define FireEtXplatBGCAllocWaitBegin(Reason, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCAllocWaitBegin)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCAllocWait(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCAllocWaitBegin, Reason, ClrInstanceID) : 0 - -#define FireEtXplatBGCAllocWaitEnd(Reason, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCAllocWaitEnd)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCAllocWait(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCAllocWaitEnd, Reason, ClrInstanceID) : 0 - -#define FireEtXplatBGCBegin(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCBegin)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCBegin, ClrInstanceID) : 0 - -#define FireEtXplatBGCDrainMark(Objects, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCDrainMark)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCDrainMark(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCDrainMark, Objects, ClrInstanceID) : 0 - -#define FireEtXplatBGCOverflow(Min, Max, Objects, IsLarge, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCOverflow)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCOverflow(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCOverflow, Min, Max, Objects, IsLarge, ClrInstanceID) : 0 - -#define FireEtXplatBGCPlanEnd(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCPlanEnd)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCPlanEnd, ClrInstanceID) : 0 - -#define FireEtXplatBGCRevisit(Pages, Objects, IsLarge, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCRevisit)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCRevisit(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCRevisit, Pages, Objects, IsLarge, ClrInstanceID) : 0 - -#define FireEtXplatBGCSweepEnd(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCSweepEnd)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimePrivateHandle, &BGCSweepEnd, ClrInstanceID) : 0 - -#define FireEtXplatGCFullNotify_V1(GenNumber, IsAlloc, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCFullNotify_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCFullNotify_V1(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCFullNotify_V1, GenNumber, IsAlloc, ClrInstanceID) : 0 - -#define FireEtXplatGCGlobalHeapHistory_V1(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCGlobalHeapHistory_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCGlobalHeap_V1(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCGlobalHeapHistory_V1, FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID) : 0 - -#define FireEtXplatGCJoin_V1(Heap, JoinTime, JoinType, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCJoin_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCJoin_V1(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCJoin_V1, Heap, JoinTime, JoinType, ClrInstanceID) : 0 - -#define FireEtXplatGCOptimized_V1(DesiredAllocation, NewAllocation, GenerationNumber, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCOptimized_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCOptimized_V1(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCOptimized_V1, DesiredAllocation, NewAllocation, GenerationNumber, ClrInstanceID) : 0 - -#define FireEtXplatGCPerHeapHistory() (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCPerHeapHistory)) ? TemplateEventDescriptor(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCPerHeapHistory) : 0 - -#define FireEtXplatGCSettings(SegmentSize, LargeObjectSegmentSize, ServerGC) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCSettings)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCSettings(Microsoft_Windows_DotNETRuntimePrivateHandle, &GCSettings, SegmentSize, LargeObjectSegmentSize, ServerGC) : 0 - -#define FireEtXplatPinPlugAtGCTime(PlugStart, PlugEnd, GapBeforeSize, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &PinPlugAtGCTime)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PinPlugAtGCTime(Microsoft_Windows_DotNETRuntimePrivateHandle, &PinPlugAtGCTime, PlugStart, PlugEnd, GapBeforeSize, ClrInstanceID) : 0 - -#define FireEtXplatPrvDestroyGCHandle(HandleID, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvDestroyGCHandle)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvDestroyGCHandle(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvDestroyGCHandle, HandleID, ClrInstanceID) : 0 - -#define FireEtXplatPrvGCMarkCards_V1(HeapNum, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvGCMarkCards_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvGCMark_V1(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvGCMarkCards_V1, HeapNum, ClrInstanceID) : 0 - -#define FireEtXplatPrvGCMarkFinalizeQueueRoots_V1(HeapNum, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvGCMarkFinalizeQueueRoots_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvGCMark_V1(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvGCMarkFinalizeQueueRoots_V1, HeapNum, ClrInstanceID) : 0 - -#define FireEtXplatPrvGCMarkHandles_V1(HeapNum, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvGCMarkHandles_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvGCMark_V1(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvGCMarkHandles_V1, HeapNum, ClrInstanceID) : 0 - -#define FireEtXplatPrvGCMarkStackRoots_V1(HeapNum, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvGCMarkStackRoots_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvGCMark_V1(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvGCMarkStackRoots_V1, HeapNum, ClrInstanceID) : 0 - -#define FireEtXplatPrvSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvSetGCHandle)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvSetGCHandle(Microsoft_Windows_DotNETRuntimePrivateHandle, &PrvSetGCHandle, HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) : 0 - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCAllocWait(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Reason, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[2]; - EventDataDescCreate(&EventData[0], &Reason, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 2, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCDrainMark(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t Objects, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[2]; - EventDataDescCreate(&EventData[0], &Objects, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 2, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCOverflow(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t Min, uint64_t Max, uint64_t Objects, uint32_t IsLarge, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[5]; - EventDataDescCreate(&EventData[0], &Min, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &Max, sizeof(uint64_t)); - EventDataDescCreate(&EventData[2], &Objects, sizeof(uint64_t)); - EventDataDescCreate(&EventData[3], &IsLarge, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 5, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_BGCRevisit(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t Pages, uint64_t Objects, uint32_t IsLarge, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[4]; - EventDataDescCreate(&EventData[0], &Pages, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &Objects, sizeof(uint64_t)); - EventDataDescCreate(&EventData[2], &IsLarge, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCFullNotify_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t GenNumber, uint32_t IsAlloc, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[3]; - EventDataDescCreate(&EventData[0], &GenNumber, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &IsAlloc, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 3, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCGlobalHeap_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t FinalYoungestDesired, int32_t NumHeaps, uint32_t CondemnedGeneration, uint32_t Gen0ReductionCount, uint32_t Reason, uint32_t GlobalMechanisms, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[7]; - EventDataDescCreate(&EventData[0], &FinalYoungestDesired, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &NumHeaps, sizeof(int32_t)); - EventDataDescCreate(&EventData[2], &CondemnedGeneration, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &Gen0ReductionCount, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], &Reason, sizeof(uint32_t)); - EventDataDescCreate(&EventData[5], &GlobalMechanisms, sizeof(uint32_t)); - EventDataDescCreate(&EventData[6], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 7, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCJoin_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Heap, uint32_t JoinTime, uint32_t JoinType, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[4]; - EventDataDescCreate(&EventData[0], &Heap, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &JoinTime, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &JoinType, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCNoUserData(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[1]; - EventDataDescCreate(&EventData[0], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 1, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCOptimized_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t DesiredAllocation, uint64_t NewAllocation, uint32_t GenerationNumber, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[4]; - EventDataDescCreate(&EventData[0], &DesiredAllocation, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &NewAllocation, sizeof(uint64_t)); - EventDataDescCreate(&EventData[2], &GenerationNumber, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_GCSettings(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t SegmentSize, uint64_t LargeObjectSegmentSize, UInt32_BOOL ServerGC) -{ - EVENT_DATA_DESCRIPTOR EventData[3]; - EventDataDescCreate(&EventData[0], &SegmentSize, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &LargeObjectSegmentSize, sizeof(uint64_t)); - EventDataDescCreate(&EventData[2], &ServerGC, sizeof(UInt32_BOOL)); - return PalEventWrite(RegHandle, Descriptor, 3, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PinPlugAtGCTime(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, const void* PlugStart, const void* PlugEnd, const void* GapBeforeSize, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[4]; - EventDataDescCreate(&EventData[0], &PlugStart, sizeof(const void*)); - EventDataDescCreate(&EventData[1], &PlugEnd, sizeof(const void*)); - EventDataDescCreate(&EventData[2], &GapBeforeSize, sizeof(const void*)); - EventDataDescCreate(&EventData[3], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvDestroyGCHandle(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, const void* HandleID, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[2]; - EventDataDescCreate(&EventData[0], &HandleID, sizeof(const void*)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 2, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvGCMark_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t HeapNum, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[2]; - EventDataDescCreate(&EventData[0], &HeapNum, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 2, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PRIVATE_PROVIDER_PrvSetGCHandle(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, const void* HandleID, const void* ObjectID, uint32_t Kind, uint32_t Generation, uint64_t AppDomainID, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[6]; - EventDataDescCreate(&EventData[0], &HandleID, sizeof(const void*)); - EventDataDescCreate(&EventData[1], &ObjectID, sizeof(const void*)); - EventDataDescCreate(&EventData[2], &Kind, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &Generation, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], &AppDomainID, sizeof(uint64_t)); - EventDataDescCreate(&EventData[5], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 6, EventData); -} - -extern "C" __declspec(selectany) const GUID MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER = {0xe13c0d23, 0xccbc, 0x4e12, {0x93, 0x1b, 0xd9, 0xcc, 0x2e, 0xee, 0x27, 0xe4}}; - -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR BulkType = {0xf, 0x0, 0x10, 0x4, 0xa, 0x15, 0x8000000000080000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR DestroyGCHandle = {0x1f, 0x0, 0x10, 0x4, 0x22, 0x1, 0x8000000000000002}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR ExceptionThrown_V1 = {0x50, 0x1, 0x10, 0x2, 0x1, 0x7, 0x8000000200008000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCAllocationTick_V1 = {0xa, 0x1, 0x10, 0x5, 0xb, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCAllocationTick_V2 = {0xa, 0x2, 0x10, 0x5, 0xb, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCAllocationTick_V3 = {0xa, 0x3, 0x10, 0x5, 0xb, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCBulkEdge = {0x13, 0x0, 0x10, 0x4, 0x17, 0x1, 0x8000000000100000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCBulkMovedObjectRanges = {0x16, 0x0, 0x10, 0x4, 0x1a, 0x1, 0x8000000000400000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCBulkNode = {0x12, 0x0, 0x10, 0x4, 0x16, 0x1, 0x8000000000100000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCBulkRCW = {0x25, 0x0, 0x10, 0x4, 0x27, 0x1, 0x8000000000100000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCBulkRootCCW = {0x24, 0x0, 0x10, 0x4, 0x26, 0x1, 0x8000000000100000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCBulkRootConditionalWeakTableElementEdge = {0x11, 0x0, 0x10, 0x4, 0x15, 0x1, 0x8000000000100000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCBulkRootEdge = {0x10, 0x0, 0x10, 0x4, 0x14, 0x1, 0x8000000000100000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCBulkSurvivingObjectRanges = {0x15, 0x0, 0x10, 0x4, 0x19, 0x1, 0x8000000000400000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCCreateConcurrentThread_V1 = {0xb, 0x1, 0x10, 0x4, 0xc, 0x1, 0x8000000000010001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCCreateSegment_V1 = {0x5, 0x1, 0x10, 0x4, 0x86, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCEnd_V1 = {0x2, 0x1, 0x10, 0x4, 0x2, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCFreeSegment_V1 = {0x6, 0x1, 0x10, 0x4, 0x87, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCGenerationRange = {0x17, 0x0, 0x10, 0x4, 0x1b, 0x1, 0x8000000000400000}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCGlobalHeapHistory_V2 = {0xcd, 0x2, 0x10, 0x4, 0xcd, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCHeapStats_V1 = {0x4, 0x1, 0x10, 0x4, 0x85, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCJoin_V2 = {0xcb, 0x2, 0x10, 0x5, 0xcb, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCMarkFinalizeQueueRoots = {0x1a, 0x0, 0x10, 0x4, 0x1d, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCMarkHandles = {0x1b, 0x0, 0x10, 0x4, 0x1e, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCMarkOlderGenerationRoots = {0x1c, 0x0, 0x10, 0x4, 0x1f, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCMarkStackRoots = {0x19, 0x0, 0x10, 0x4, 0x1c, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCMarkWithType = {0xca, 0x0, 0x10, 0x4, 0xca, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCPerHeapHistory_V3 = {0xcc, 0x3, 0x10, 0x4, 0xcc, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCRestartEEBegin_V1 = {0x7, 0x1, 0x10, 0x4, 0x88, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCRestartEEEnd_V1 = {0x3, 0x1, 0x10, 0x4, 0x84, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCStart_V1 = {0x1, 0x1, 0x10, 0x4, 0x1, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCStart_V2 = {0x1, 0x2, 0x10, 0x4, 0x1, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCSuspendEEBegin_V1 = {0x9, 0x1, 0x10, 0x4, 0xa, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCSuspendEEEnd_V1 = {0x8, 0x1, 0x10, 0x4, 0x89, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCTerminateConcurrentThread_V1 = {0xc, 0x1, 0x10, 0x4, 0xd, 0x1, 0x8000000000010001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR GCTriggered = {0x23, 0x0, 0x10, 0x4, 0x23, 0x1, 0x8000000000000001}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR ModuleLoad_V2 = {0x98, 0x2, 0x10, 0x4, 0x21, 0xa, 0x8000000020000008}; -extern "C" __declspec(selectany) const EVENT_DESCRIPTOR SetGCHandle = {0x1e, 0x0, 0x10, 0x4, 0x21, 0x1, 0x8000000000000002}; - -extern "C" __declspec(selectany) REGHANDLE Microsoft_Windows_DotNETRuntimeHandle; -extern "C" __declspec(selectany) MCGEN_TRACE_CONTEXT MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context; - -#define RH_ETW_REGISTER_Microsoft_Windows_DotNETRuntime() do { PalEventRegister(&MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER, EtwCallback, &MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, &Microsoft_Windows_DotNETRuntimeHandle); } while (false) -#define RH_ETW_UNREGISTER_Microsoft_Windows_DotNETRuntime() do { PalEventUnregister(Microsoft_Windows_DotNETRuntimeHandle); } while (false) - -#define FireEtXplatDestroyGCHandle(HandleID, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &DestroyGCHandle)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_DestroyGCHandle(Microsoft_Windows_DotNETRuntimeHandle, &DestroyGCHandle, HandleID, ClrInstanceID) : 0 - -#define FireEtXplatExceptionThrown_V1(ExceptionType, ExceptionMessage, ExceptionEIP, ExceptionHRESULT, ExceptionFlags, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &ExceptionThrown_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_Exception(Microsoft_Windows_DotNETRuntimeHandle, &ExceptionThrown_V1, ExceptionType, ExceptionMessage, ExceptionEIP, ExceptionHRESULT, ExceptionFlags, ClrInstanceID) : 0 - -#define FireEtXplatGCAllocationTick_V1(AllocationAmount, AllocationKind, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCAllocationTick_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCAllocationTick_V1(Microsoft_Windows_DotNETRuntimeHandle, &GCAllocationTick_V1, AllocationAmount, AllocationKind, ClrInstanceID) : 0 - -#define FireEtXplatGCAllocationTick_V2(AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCAllocationTick_V2)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCAllocationTick_V2(Microsoft_Windows_DotNETRuntimeHandle, &GCAllocationTick_V2, AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex) : 0 - -#define FireEtXplatGCAllocationTick_V3(AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex, Address) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCAllocationTick_V3)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCAllocationTick_V3(Microsoft_Windows_DotNETRuntimeHandle, &GCAllocationTick_V3, AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex, Address) : 0 - -#define FireEtXplatGCBulkEdge(Index, Count, ClrInstanceID, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkEdge)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkEdge(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkEdge, Index, Count, ClrInstanceID, Values_Len_, Values) : 0 - -#define FireEtXplatGCBulkMovedObjectRanges(Index, Count, ClrInstanceID, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkMovedObjectRanges)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkMovedObjectRanges(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkMovedObjectRanges, Index, Count, ClrInstanceID, Values_Len_, Values) : 0 - -#define FireEtXplatGCBulkNode(Index, Count, ClrInstanceID, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkNode)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkNode(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkNode, Index, Count, ClrInstanceID, Values_Len_, Values) : 0 - -#define FireEtXplatGCBulkRCW(Count, ClrInstanceID, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRCW)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkRCW(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRCW, Count, ClrInstanceID, Values_Len_, Values) : 0 - -#define FireEtXplatGCBulkRootCCW(Count, ClrInstanceID, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootCCW)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkRootCCW(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootCCW, Count, ClrInstanceID, Values_Len_, Values) : 0 - -#define FireEtXplatGCBulkRootConditionalWeakTableElementEdge(Index, Count, ClrInstanceID, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootConditionalWeakTableElementEdge)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkRootConditionalWeakTableElementEdge(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootConditionalWeakTableElementEdge, Index, Count, ClrInstanceID, Values_Len_, Values) : 0 - -#define FireEtXplatGCBulkRootEdge(Index, Count, ClrInstanceID, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootEdge)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkRootEdge(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkRootEdge, Index, Count, ClrInstanceID, Values_Len_, Values) : 0 - -#define FireEtXplatGCBulkSurvivingObjectRanges(Index, Count, ClrInstanceID, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkSurvivingObjectRanges)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkSurvivingObjectRanges(Microsoft_Windows_DotNETRuntimeHandle, &GCBulkSurvivingObjectRanges, Index, Count, ClrInstanceID, Values_Len_, Values) : 0 - -#define FireEtXplatGCCreateConcurrentThread_V1(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCCreateConcurrentThread_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCCreateConcurrentThread(Microsoft_Windows_DotNETRuntimeHandle, &GCCreateConcurrentThread_V1, ClrInstanceID) : 0 - -#define FireEtXplatGCCreateSegment_V1(Address, Size, Type, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCCreateSegment_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCCreateSegment_V1(Microsoft_Windows_DotNETRuntimeHandle, &GCCreateSegment_V1, Address, Size, Type, ClrInstanceID) : 0 - -#define FireEtXplatGCEnd_V1(Count, Depth, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCEnd_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCEnd_V1(Microsoft_Windows_DotNETRuntimeHandle, &GCEnd_V1, Count, Depth, ClrInstanceID) : 0 - -#define FireEtXplatGCFreeSegment_V1(Address, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCFreeSegment_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCFreeSegment_V1(Microsoft_Windows_DotNETRuntimeHandle, &GCFreeSegment_V1, Address, ClrInstanceID) : 0 - -#define FireEtXplatGCGenerationRange(Generation, RangeStart, RangeUsedLength, RangeReservedLength, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCGenerationRange)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCGenerationRange(Microsoft_Windows_DotNETRuntimeHandle, &GCGenerationRange, Generation, RangeStart, RangeUsedLength, RangeReservedLength, ClrInstanceID) : 0 - -#define FireEtXplatGCGlobalHeapHistory_V2(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID, PauseMode, MemoryPressure) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCGlobalHeapHistory_V2)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCGlobalHeap_V2(Microsoft_Windows_DotNETRuntimeHandle, &GCGlobalHeapHistory_V2, FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID, PauseMode, MemoryPressure) : 0 - -#define FireEtXplatGCHeapStats_V1(GenerationSize0, TotalPromotedSize0, GenerationSize1, TotalPromotedSize1, GenerationSize2, TotalPromotedSize2, GenerationSize3, TotalPromotedSize3, FinalizationPromotedSize, FinalizationPromotedCount, PinnedObjectCount, SinkBlockCount, GCHandleCount, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCHeapStats_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCHeapStats_V1(Microsoft_Windows_DotNETRuntimeHandle, &GCHeapStats_V1, GenerationSize0, TotalPromotedSize0, GenerationSize1, TotalPromotedSize1, GenerationSize2, TotalPromotedSize2, GenerationSize3, TotalPromotedSize3, FinalizationPromotedSize, FinalizationPromotedCount, PinnedObjectCount, SinkBlockCount, GCHandleCount, ClrInstanceID) : 0 - -#define FireEtXplatGCJoin_V2(Heap, JoinTime, JoinType, ClrInstanceID, JoinID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCJoin_V2)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCJoin_V2(Microsoft_Windows_DotNETRuntimeHandle, &GCJoin_V2, Heap, JoinTime, JoinType, ClrInstanceID, JoinID) : 0 - -#define FireEtXplatGCMarkFinalizeQueueRoots(HeapNum, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkFinalizeQueueRoots)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCMark(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkFinalizeQueueRoots, HeapNum, ClrInstanceID) : 0 - -#define FireEtXplatGCMarkHandles(HeapNum, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkHandles)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCMark(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkHandles, HeapNum, ClrInstanceID) : 0 - -#define FireEtXplatGCMarkOlderGenerationRoots(HeapNum, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkOlderGenerationRoots)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCMark(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkOlderGenerationRoots, HeapNum, ClrInstanceID) : 0 - -#define FireEtXplatGCMarkStackRoots(HeapNum, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkStackRoots)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCMark(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkStackRoots, HeapNum, ClrInstanceID) : 0 - -#define FireEtXplatGCMarkWithType(HeapNum, ClrInstanceID, Type, Bytes) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkWithType)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCMarkWithType(Microsoft_Windows_DotNETRuntimeHandle, &GCMarkWithType, HeapNum, ClrInstanceID, Type, Bytes) : 0 - -#define FireEtXplatGCPerHeapHistory_V3(ClrInstanceID, FreeListAllocated, FreeListRejected, EndOfSegAllocated, CondemnedAllocated, PinnedAllocated, PinnedAllocatedAdvance, RunningFreeListEfficiency, CondemnReasons0, CondemnReasons1, CompactMechanisms, ExpandMechanisms, HeapIndex, ExtraGen0Commit, Count, Values_Len_, Values) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCPerHeapHistory_V3)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCPerHeapHistory_V3(Microsoft_Windows_DotNETRuntimeHandle, &GCPerHeapHistory_V3, ClrInstanceID, FreeListAllocated, FreeListRejected, EndOfSegAllocated, CondemnedAllocated, PinnedAllocated, PinnedAllocatedAdvance, RunningFreeListEfficiency, CondemnReasons0, CondemnReasons1, CompactMechanisms, ExpandMechanisms, HeapIndex, ExtraGen0Commit, Count, Values_Len_, Values) : 0 - -#define FireEtXplatGCRestartEEBegin_V1(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCRestartEEBegin_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimeHandle, &GCRestartEEBegin_V1, ClrInstanceID) : 0 - -#define FireEtXplatGCRestartEEEnd_V1(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCRestartEEEnd_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimeHandle, &GCRestartEEEnd_V1, ClrInstanceID) : 0 - -#define FireEtXplatGCStart_V1(Count, Depth, Reason, Type, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCStart_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCStart_V1(Microsoft_Windows_DotNETRuntimeHandle, &GCStart_V1, Count, Depth, Reason, Type, ClrInstanceID) : 0 - -#define FireEtXplatGCStart_V2(Count, Depth, Reason, Type, ClrInstanceID, ClientSequenceNumber) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCStart_V2)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCStart_V2(Microsoft_Windows_DotNETRuntimeHandle, &GCStart_V2, Count, Depth, Reason, Type, ClrInstanceID, ClientSequenceNumber) : 0 - -#define FireEtXplatGCSuspendEEBegin_V1(Reason, Count, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCSuspendEEBegin_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCSuspendEE_V1(Microsoft_Windows_DotNETRuntimeHandle, &GCSuspendEEBegin_V1, Reason, Count, ClrInstanceID) : 0 - -#define FireEtXPlatGCSuspendEEEnd_V1(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCSuspendEEEnd_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCNoUserData(Microsoft_Windows_DotNETRuntimeHandle, &GCSuspendEEEnd_V1, ClrInstanceID) : 0 - -#define FireEtXplatGCTerminateConcurrentThread_V1(ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCTerminateConcurrentThread_V1)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCTerminateConcurrentThread(Microsoft_Windows_DotNETRuntimeHandle, &GCTerminateConcurrentThread_V1, ClrInstanceID) : 0 - -#define FireEtXplatGCTriggered(Reason, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &GCTriggered)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCTriggered(Microsoft_Windows_DotNETRuntimeHandle, &GCTriggered, Reason, ClrInstanceID) : 0 - -#define FireEtXplatModuleLoad_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &ModuleLoad_V2)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_ModuleLoadUnload_V2(Microsoft_Windows_DotNETRuntimeHandle, &ModuleLoad_V2, ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) : 0 - -#define FireEtXplatSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) (MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled && PalEventEnabled(Microsoft_Windows_DotNETRuntimeHandle, &SetGCHandle)) ? Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_SetGCHandle(Microsoft_Windows_DotNETRuntimeHandle, &SetGCHandle, HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) : 0 - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_BulkType(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[11]; - EventDataDescCreate(&EventData[0], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[2], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 3, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_DestroyGCHandle(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, const void* HandleID, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[2]; - EventDataDescCreate(&EventData[0], &HandleID, sizeof(void*)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 2, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_Exception(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, LPCWSTR ExceptionType, LPCWSTR ExceptionMessage, const void* ExceptionEIP, uint32_t ExceptionHRESULT, uint16_t ExceptionFlags, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[6]; - EventDataDescCreate(&EventData[0], (ExceptionType != NULL) ? ExceptionType : L"", (ExceptionType != NULL) ? (ULONG)((wcslen(ExceptionType) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"")); - EventDataDescCreate(&EventData[1], (ExceptionMessage != NULL) ? ExceptionMessage : L"", (ExceptionMessage != NULL) ? (ULONG)((wcslen(ExceptionMessage) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"")); - EventDataDescCreate(&EventData[2], &ExceptionEIP, sizeof(void*)); - EventDataDescCreate(&EventData[3], &ExceptionHRESULT, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], &ExceptionFlags, sizeof(uint16_t)); - EventDataDescCreate(&EventData[5], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 6, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCAllocationTick_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t AllocationAmount, uint32_t AllocationKind, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[3]; - EventDataDescCreate(&EventData[0], &AllocationAmount, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &AllocationKind, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 3, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCAllocationTick_V2(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t AllocationAmount, uint32_t AllocationKind, uint16_t ClrInstanceID, uint64_t AllocationAmount64, void* TypeID, LPCWSTR TypeName, uint32_t HeapIndex) -{ - EVENT_DATA_DESCRIPTOR EventData[7]; - EventDataDescCreate(&EventData[0], &AllocationAmount, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &AllocationKind, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[3], &AllocationAmount64, sizeof(uint64_t)); - EventDataDescCreate(&EventData[4], &TypeID, sizeof(void*)); - EventDataDescCreate(&EventData[5], (TypeName != NULL) ? TypeName : L"", (TypeName != NULL) ? (ULONG)((wcslen(TypeName) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"")); - EventDataDescCreate(&EventData[6], &HeapIndex, sizeof(uint32_t)); - return PalEventWrite(RegHandle, Descriptor, 7, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCAllocationTick_V3(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t AllocationAmount, uint32_t AllocationKind, uint16_t ClrInstanceID, uint64_t AllocationAmount64, void* TypeID, LPCWSTR TypeName, uint32_t HeapIndex, void* Address) -{ - EVENT_DATA_DESCRIPTOR EventData[8]; - EventDataDescCreate(&EventData[0], &AllocationAmount, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &AllocationKind, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[3], &AllocationAmount64, sizeof(uint64_t)); - EventDataDescCreate(&EventData[4], &TypeID, sizeof(void*)); - EventDataDescCreate(&EventData[5], (TypeName != NULL) ? TypeName : L"", (TypeName != NULL) ? (ULONG)((wcslen(TypeName) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"")); - EventDataDescCreate(&EventData[6], &HeapIndex, sizeof(uint32_t)); - EventDataDescCreate(&EventData[7], &Address, sizeof(void*)); - return PalEventWrite(RegHandle, Descriptor, 8, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkEdge(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Index, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[6]; - EventDataDescCreate(&EventData[0], &Index, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[3], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkMovedObjectRanges(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Index, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[7]; - EventDataDescCreate(&EventData[0], &Index, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[3], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkNode(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Index, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[8]; - EventDataDescCreate(&EventData[0], &Index, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[3], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkRCW(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[9]; - EventDataDescCreate(&EventData[0], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[2], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 3, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkRootCCW(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[10]; - EventDataDescCreate(&EventData[0], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[2], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 3, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkRootConditionalWeakTableElementEdge(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Index, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[7]; - EventDataDescCreate(&EventData[0], &Index, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[3], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkRootEdge(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Index, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[8]; - EventDataDescCreate(&EventData[0], &Index, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[3], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCBulkSurvivingObjectRanges(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Index, uint32_t Count, uint16_t ClrInstanceID, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[6]; - EventDataDescCreate(&EventData[0], &Index, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[3], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCCreateConcurrentThread(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[1]; - EventDataDescCreate(&EventData[0], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 1, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCCreateSegment_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t Address, uint64_t Size, uint32_t Type, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[4]; - EventDataDescCreate(&EventData[0], &Address, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &Size, sizeof(uint64_t)); - EventDataDescCreate(&EventData[2], &Type, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCEnd_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Count, uint32_t Depth, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[3]; - EventDataDescCreate(&EventData[0], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Depth, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 3, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCFreeSegment_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t Address, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[2]; - EventDataDescCreate(&EventData[0], &Address, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 2, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCGenerationRange(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint8_t Generation, const void* RangeStart, uint64_t RangeUsedLength, uint64_t RangeReservedLength, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[5]; - EventDataDescCreate(&EventData[0], &Generation, sizeof(uint8_t)); - EventDataDescCreate(&EventData[1], &RangeStart, sizeof(void*)); - EventDataDescCreate(&EventData[2], &RangeUsedLength, sizeof(uint64_t)); - EventDataDescCreate(&EventData[3], &RangeReservedLength, sizeof(uint64_t)); - EventDataDescCreate(&EventData[4], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 5, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCGlobalHeap_V2(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t FinalYoungestDesired, int32_t NumHeaps, uint32_t CondemnedGeneration, uint32_t Gen0ReductionCount, uint32_t Reason, uint32_t GlobalMechanisms, uint16_t ClrInstanceID, uint32_t PauseMode, uint32_t MemoryPressure) -{ - EVENT_DATA_DESCRIPTOR EventData[9]; - EventDataDescCreate(&EventData[0], &FinalYoungestDesired, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &NumHeaps, sizeof(int32_t)); - EventDataDescCreate(&EventData[2], &CondemnedGeneration, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &Gen0ReductionCount, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], &Reason, sizeof(uint32_t)); - EventDataDescCreate(&EventData[5], &GlobalMechanisms, sizeof(uint32_t)); - EventDataDescCreate(&EventData[6], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[7], &PauseMode, sizeof(uint32_t)); - EventDataDescCreate(&EventData[8], &MemoryPressure, sizeof(uint32_t)); - return PalEventWrite(RegHandle, Descriptor, 9, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCHeapStats_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t GenerationSize0, uint64_t TotalPromotedSize0, uint64_t GenerationSize1, uint64_t TotalPromotedSize1, uint64_t GenerationSize2, uint64_t TotalPromotedSize2, uint64_t GenerationSize3, uint64_t TotalPromotedSize3, uint64_t FinalizationPromotedSize, uint64_t FinalizationPromotedCount, uint32_t PinnedObjectCount, uint32_t SinkBlockCount, uint32_t GCHandleCount, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[14]; - EventDataDescCreate(&EventData[0], &GenerationSize0, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &TotalPromotedSize0, sizeof(uint64_t)); - EventDataDescCreate(&EventData[2], &GenerationSize1, sizeof(uint64_t)); - EventDataDescCreate(&EventData[3], &TotalPromotedSize1, sizeof(uint64_t)); - EventDataDescCreate(&EventData[4], &GenerationSize2, sizeof(uint64_t)); - EventDataDescCreate(&EventData[5], &TotalPromotedSize2, sizeof(uint64_t)); - EventDataDescCreate(&EventData[6], &GenerationSize3, sizeof(uint64_t)); - EventDataDescCreate(&EventData[7], &TotalPromotedSize3, sizeof(uint64_t)); - EventDataDescCreate(&EventData[8], &FinalizationPromotedSize, sizeof(uint64_t)); - EventDataDescCreate(&EventData[9], &FinalizationPromotedCount, sizeof(uint64_t)); - EventDataDescCreate(&EventData[10], &PinnedObjectCount, sizeof(uint32_t)); - EventDataDescCreate(&EventData[11], &SinkBlockCount, sizeof(uint32_t)); - EventDataDescCreate(&EventData[12], &GCHandleCount, sizeof(uint32_t)); - EventDataDescCreate(&EventData[13], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 14, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCJoin_V2(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Heap, uint32_t JoinTime, uint32_t JoinType, uint16_t ClrInstanceID, uint32_t JoinID) -{ - EVENT_DATA_DESCRIPTOR EventData[5]; - EventDataDescCreate(&EventData[0], &Heap, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &JoinTime, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &JoinType, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[4], &JoinID, sizeof(uint32_t)); - return PalEventWrite(RegHandle, Descriptor, 5, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCMark(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t HeapNum, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[2]; - EventDataDescCreate(&EventData[0], &HeapNum, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 2, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCMarkWithType(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t HeapNum, uint16_t ClrInstanceID, uint32_t Type, uint64_t Bytes) -{ - EVENT_DATA_DESCRIPTOR EventData[4]; - EventDataDescCreate(&EventData[0], &HeapNum, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[2], &Type, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &Bytes, sizeof(uint64_t)); - return PalEventWrite(RegHandle, Descriptor, 4, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCNoUserData(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[1]; - EventDataDescCreate(&EventData[0], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 1, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCPerHeapHistory_V3(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint16_t ClrInstanceID, const void* FreeListAllocated, const void* FreeListRejected, const void* EndOfSegAllocated, const void* CondemnedAllocated, const void* PinnedAllocated, const void* PinnedAllocatedAdvance, uint32_t RunningFreeListEfficiency, uint32_t CondemnReasons0, uint32_t CondemnReasons1, uint32_t CompactMechanisms, uint32_t ExpandMechanisms, uint32_t HeapIndex, const void* ExtraGen0Commit, uint32_t Count, ULONG Values_Len_, const void* Values) -{ - EVENT_DATA_DESCRIPTOR EventData[26]; - EventDataDescCreate(&EventData[0], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[1], &FreeListAllocated, sizeof(void*)); - EventDataDescCreate(&EventData[2], &FreeListRejected, sizeof(void*)); - EventDataDescCreate(&EventData[3], &EndOfSegAllocated, sizeof(void*)); - EventDataDescCreate(&EventData[4], &CondemnedAllocated, sizeof(void*)); - EventDataDescCreate(&EventData[5], &PinnedAllocated, sizeof(void*)); - EventDataDescCreate(&EventData[6], &PinnedAllocatedAdvance, sizeof(void*)); - EventDataDescCreate(&EventData[7], &RunningFreeListEfficiency, sizeof(uint32_t)); - EventDataDescCreate(&EventData[8], &CondemnReasons0, sizeof(uint32_t)); - EventDataDescCreate(&EventData[9], &CondemnReasons1, sizeof(uint32_t)); - EventDataDescCreate(&EventData[10], &CompactMechanisms, sizeof(uint32_t)); - EventDataDescCreate(&EventData[11], &ExpandMechanisms, sizeof(uint32_t)); - EventDataDescCreate(&EventData[12], &HeapIndex, sizeof(uint32_t)); - EventDataDescCreate(&EventData[13], &ExtraGen0Commit, sizeof(void*)); - EventDataDescCreate(&EventData[14], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[15], Values, Count * Values_Len_); - return PalEventWrite(RegHandle, Descriptor, 16, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCStart_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Count, uint32_t Depth, uint32_t Reason, uint32_t Type, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[5]; - EventDataDescCreate(&EventData[0], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Depth, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &Reason, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &Type, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 5, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCStart_V2(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Count, uint32_t Depth, uint32_t Reason, uint32_t Type, uint16_t ClrInstanceID, uint64_t ClientSequenceNumber) -{ - EVENT_DATA_DESCRIPTOR EventData[6]; - EventDataDescCreate(&EventData[0], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Depth, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &Reason, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &Type, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[5], &ClientSequenceNumber, sizeof(uint64_t)); - return PalEventWrite(RegHandle, Descriptor, 6, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCSuspendEE_V1(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Reason, uint32_t Count, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[3]; - EventDataDescCreate(&EventData[0], &Reason, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &Count, sizeof(uint32_t)); - EventDataDescCreate(&EventData[2], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 3, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCTerminateConcurrentThread(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[1]; - EventDataDescCreate(&EventData[0], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 1, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_GCTriggered(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint32_t Reason, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[2]; - EventDataDescCreate(&EventData[0], &Reason, sizeof(uint32_t)); - EventDataDescCreate(&EventData[1], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 2, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_ModuleLoadUnload_V2(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, uint64_t ModuleID, uint64_t AssemblyID, uint32_t ModuleFlags, uint32_t Reserved1, LPCWSTR ModuleILPath, LPCWSTR ModuleNativePath, uint16_t ClrInstanceID, const GUID* ManagedPdbSignature, uint32_t ManagedPdbAge, LPCWSTR ManagedPdbBuildPath, const GUID* NativePdbSignature, uint32_t NativePdbAge, LPCWSTR NativePdbBuildPath) -{ - EVENT_DATA_DESCRIPTOR EventData[13]; - EventDataDescCreate(&EventData[0], &ModuleID, sizeof(uint64_t)); - EventDataDescCreate(&EventData[1], &AssemblyID, sizeof(uint64_t)); - EventDataDescCreate(&EventData[2], &ModuleFlags, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &Reserved1, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], (ModuleILPath != NULL) ? ModuleILPath : L"", (ModuleILPath != NULL) ? (ULONG)((wcslen(ModuleILPath) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"")); - EventDataDescCreate(&EventData[5], (ModuleNativePath != NULL) ? ModuleNativePath : L"", (ModuleNativePath != NULL) ? (ULONG)((wcslen(ModuleNativePath) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"")); - EventDataDescCreate(&EventData[6], &ClrInstanceID, sizeof(uint16_t)); - EventDataDescCreate(&EventData[7], ManagedPdbSignature, sizeof(*(ManagedPdbSignature))); - EventDataDescCreate(&EventData[8], &ManagedPdbAge, sizeof(uint32_t)); - EventDataDescCreate(&EventData[9], (ManagedPdbBuildPath != NULL) ? ManagedPdbBuildPath : L"", (ManagedPdbBuildPath != NULL) ? (ULONG)((wcslen(ManagedPdbBuildPath) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"")); - EventDataDescCreate(&EventData[10], NativePdbSignature, sizeof(*(NativePdbSignature))); - EventDataDescCreate(&EventData[11], &NativePdbAge, sizeof(uint32_t)); - EventDataDescCreate(&EventData[12], (NativePdbBuildPath != NULL) ? NativePdbBuildPath : L"", (NativePdbBuildPath != NULL) ? (ULONG)((wcslen(NativePdbBuildPath) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"")); - return PalEventWrite(RegHandle, Descriptor, 13, EventData); -} - -RH_ETW_INLINE uint32_t -Template_MICROSOFT_WINDOWS_NATIVEAOT_GC_PUBLIC_PROVIDER_SetGCHandle(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor, const void* HandleID, const void* ObjectID, uint32_t Kind, uint32_t Generation, uint64_t AppDomainID, uint16_t ClrInstanceID) -{ - EVENT_DATA_DESCRIPTOR EventData[6]; - EventDataDescCreate(&EventData[0], &HandleID, sizeof(void*)); - EventDataDescCreate(&EventData[1], &ObjectID, sizeof(void*)); - EventDataDescCreate(&EventData[2], &Kind, sizeof(uint32_t)); - EventDataDescCreate(&EventData[3], &Generation, sizeof(uint32_t)); - EventDataDescCreate(&EventData[4], &AppDomainID, sizeof(uint64_t)); - EventDataDescCreate(&EventData[5], &ClrInstanceID, sizeof(uint16_t)); - return PalEventWrite(RegHandle, Descriptor, 6, EventData); -} - -RH_ETW_INLINE uint32_t -TemplateEventDescriptor(REGHANDLE RegHandle, const EVENT_DESCRIPTOR * Descriptor) -{ - return PalEventWrite(RegHandle, Descriptor, 0, NULL); -} - -#else // FEATURE_ETW - -#define ETW_EVENT_ENABLED(Context, EventDescriptor) false - -#define FireEtXplatDestroyGCHandle(HandleID, ClrInstanceID) -#define FireEtXplatExceptionThrown_V1(ExceptionType, ExceptionMessage, ExceptionEIP, ExceptionHRESULT, ExceptionFlags, ClrInstanceID) -#define FireEtXplatGCAllocationTick_V1(AllocationAmount, AllocationKind, ClrInstanceID) -#define FireEtXplatGCAllocationTick_V2(AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex) -#define FireEtXplatGCAllocationTick_V3(AllocationAmount, AllocationKind, ClrInstanceID, AllocationAmount64, TypeID, TypeName, HeapIndex, Address) -#define FireEtXplatGCBulkEdge(Index, Count, ClrInstanceID, Values_Len_, Values) -#define FireEtXplatGCBulkMovedObjectRanges(Index, Count, ClrInstanceID, Values_Len_, Values) -#define FireEtXplatGCBulkNode(Index, Count, ClrInstanceID, Values_Len_, Values) -#define FireEtXplatGCBulkRCW(Count, ClrInstanceID, Values_Len_, Values) -#define FireEtXplatGCBulkRootCCW(Count, ClrInstanceID, Values_Len_, Values) -#define FireEtXplatGCBulkRootConditionalWeakTableElementEdge(Index, Count, ClrInstanceID, Values_Len_, Values) -#define FireEtXplatGCBulkRootEdge(Index, Count, ClrInstanceID, Values_Len_, Values) -#define FireEtXplatGCBulkSurvivingObjectRanges(Index, Count, ClrInstanceID, Values_Len_, Values) -#define FireEtXplatGCCreateConcurrentThread_V1(ClrInstanceID) -#define FireEtXplatGCCreateSegment_V1(Address, Size, Type, ClrInstanceID) -#define FireEtXplatGCEnd_V1(Count, Depth, ClrInstanceID) -#define FireEtXplatGCFreeSegment_V1(Address, ClrInstanceID) -#define FireEtXplatGCGenerationRange(Generation, RangeStart, RangeUsedLength, RangeReservedLength, ClrInstanceID) -#define FireEtXplatGCGlobalHeapHistory_V2(FinalYoungestDesired, NumHeaps, CondemnedGeneration, Gen0ReductionCount, Reason, GlobalMechanisms, ClrInstanceID, PauseMode, MemoryPressure) -#define FireEtXplatGCHeapStats_V1(GenerationSize0, TotalPromotedSize0, GenerationSize1, TotalPromotedSize1, GenerationSize2, TotalPromotedSize2, GenerationSize3, TotalPromotedSize3, FinalizationPromotedSize, FinalizationPromotedCount, PinnedObjectCount, SinkBlockCount, GCHandleCount, ClrInstanceID) -#define FireEtXplatGCJoin_V2(Heap, JoinTime, JoinType, ClrInstanceID, JoinID) -#define FireEtXplatGCMarkFinalizeQueueRoots(HeapNum, ClrInstanceID) -#define FireEtXplatGCMarkHandles(HeapNum, ClrInstanceID) -#define FireEtXplatGCMarkOlderGenerationRoots(HeapNum, ClrInstanceID) -#define FireEtXplatGCMarkStackRoots(HeapNum, ClrInstanceID) -#define FireEtXplatGCMarkWithType(HeapNum, ClrInstanceID, Type, Bytes) -#define FireEtXplatGCPerHeapHistory_V3(ClrInstanceID, FreeListAllocated, FreeListRejected, EndOfSegAllocated, CondemnedAllocated, PinnedAllocated, PinnedAllocatedAdvance, RunningFreeListEfficiency, CondemnReasons0, CondemnReasons1, CompactMechanisms, ExpandMechanisms, HeapIndex, ExtraGen0Commit, Count, Values_Len_, Values) -#define FireEtXplatGCRestartEEBegin_V1(ClrInstanceID) -#define FireEtXplatGCRestartEEEnd_V1(ClrInstanceID) -#define FireEtXplatGCStart_V1(Count, Depth, Reason, Type, ClrInstanceID) -#define FireEtXplatGCStart_V2(Count, Depth, Reason, Type, ClrInstanceID, ClientSequenceNumber) -#define FireEtXplatGCSuspendEEBegin_V1(Reason, Count, ClrInstanceID) -#define FireEtXPlatGCSuspendEEEnd_V1(ClrInstanceID) -#define FireEtXplatGCTerminateConcurrentThread_V1(ClrInstanceID) -#define FireEtXplatGCTriggered(Reason, ClrInstanceID) -#define FireEtXplatModuleLoad_V2(ModuleID, AssemblyID, ModuleFlags, Reserved1, ModuleILPath, ModuleNativePath, ClrInstanceID, ManagedPdbSignature, ManagedPdbAge, ManagedPdbBuildPath, NativePdbSignature, NativePdbAge, NativePdbBuildPath) -#define FireEtXplatSetGCHandle(HandleID, ObjectID, Kind, Generation, AppDomainID, ClrInstanceID) - -#endif // FEATURE_ETW - -#define FireEtXplatGCFinalizersEnd_V1(Count, ClrInstanceID) 0 -#define FireEtXplatGCHeapStats_V2(GenerationSize0,TotalPromotedSize0,GenerationSize1,TotalPromotedSize1,GenerationSize2,TotalPromotedSize2,GenerationSize3,TotalPromotedSize3,FinalizationPromotedSize,FinalizationPromotedCount,PinnedObjectCount,SinkBlockCount,GCHandleCount,ClrInstanceID,GenerationSize4,TotalPromotedSize4) 0 -#define FireEtXplatGCSuspendEEEnd_V1(ClrInstanceID) 0 -#define FireEtXplatGCAllocationTick_V4(AllocationAmount,AllocationKind,ClrInstanceID,AllocationAmount64,TypeID,TypeName,HeapIndex,Address,ObjectSize) 0 -#define FireEtXplatGCFinalizersBegin_V1(ClrInstanceID) 0 -#define FireEtXplatGCSampledObjectAllocationHigh(Address,TypeID,ObjectCountForTypeSample,TotalSizeForTypeSample,ClrInstanceID) 0 -#define FireEtXplatFinalizeObject(TypeID,ObjectID,ClrInstanceID) 0 -#define FireEtXplatGCSampledObjectAllocationLow(Address,TypeID,ObjectCountForTypeSample,TotalSizeForTypeSample,ClrInstanceID) 0 -#define FireEtXplatPinObjectAtGCTime(HandleID,ObjectID,ObjectSize,TypeName,ClrInstanceID) 0 -#define FireEtXplatGCBulkRootStaticVar(Count,AppDomainID,ClrInstanceID,Values_ElementSize, Values) 0 -#define FireEtXplatThreadPoolWorkerThreadStart(ActiveWorkerThreadCount,RetiredWorkerThreadCount,ClrInstanceID) 0 -#define FireEtXplatThreadPoolWorkerThreadStop(ActiveWorkerThreadCount,RetiredWorkerThreadCount,ClrInstanceID) 0 -#define FireEtXplatThreadPoolWorkerThreadAdjustmentSample(Throughput,ClrInstanceID) 0 -#define FireEtXplatThreadPoolWorkerThreadAdjustmentAdjustment(AverageThroughput,NewWorkerThreadCount,Reason,ClrInstanceID) 0 -#define FireEtXplatThreadPoolWorkerThreadAdjustmentStats(Duration,Throughput,ThreadWave,ThroughputWave,ThroughputErrorEstimate,AverageThroughputErrorEstimate,ThroughputRatio,Confidence,NewControlSetting,NewThreadWaveMagnitude,ClrInstanceID) 0 -#define FireEtXplatThreadPoolWorkerThreadWait(ActiveWorkerThreadCount,RetiredWorkerThreadCount,ClrInstanceID) 0 -#define FireEtXplatThreadPoolMinMaxThreads(MinWorkerThreads,MaxWorkerThreads,MinIOCompletionThreads,MaxIOCompletionThreads,ClrInstanceID) 0 -#define FireEtXplatThreadPoolWorkingThreadCount(Count,ClrInstanceID) 0 -#define FireEtXplatThreadPoolIOEnqueue(NativeOverlapped,Overlapped,MultiDequeues,ClrInstanceID) 0 -#define FireEtXplatThreadPoolIODequeue(NativeOverlapped,Overlapped,ClrInstanceID) 0 -#define FireEtXplatThreadPoolIOPack(NativeOverlapped,Overlapped,ClrInstanceID) 0 -#define FireEtXplatContentionStart_V2(ContentionFlags,ClrInstanceID,LockID,AssociatedObjectID,LockOwnerThreadID) 0 -#define FireEtXplatContentionStop_V1(ContentionFlags,ClrInstanceID,DurationNs) 0 -#define FireEtXplatContentionLockCreated(LockID,AssociatedObjectID,ClrInstanceID) 0 -#define FireEtXplatIncreaseMemoryPressure(BytesAllocated,ClrInstanceID) 0 -#define FireEtXplatDecreaseMemoryPressure(BytesFreed,ClrInstanceID) 0 -#define FireEtXplatGCGlobalHeapHistory_V4(FinalYoungestDesired,NumHeaps,CondemnedGeneration,Gen0ReductionCount,Reason,GlobalMechanisms,ClrInstanceID,PauseMode,MemoryPressure,CondemnReasons0,CondemnReasons1,Count,Values_ElementSize, Values) 0 -#define FireEtXplatGenAwareBegin(Count,ClrInstanceID) 0 -#define FireEtXplatGenAwareEnd(Count,ClrInstanceID) 0 -#define FireEtXplatGCLOHCompact(ClrInstanceID,Count,Values_ElementSize, Values) 0 -#define FireEtXplatGCFitBucketInfo(ClrInstanceID,BucketKind,TotalSize,Count,Values_ElementSize, Values) 0 - - -#endif // !__RH_ETW_DEFS_INCLUDED diff --git a/src/coreclr/nativeaot/Runtime/PalRedhawk.h b/src/coreclr/nativeaot/Runtime/PalRedhawk.h index f79926dc001fe..3980e4f22a666 100644 --- a/src/coreclr/nativeaot/Runtime/PalRedhawk.h +++ b/src/coreclr/nativeaot/Runtime/PalRedhawk.h @@ -536,46 +536,6 @@ typedef enum _EXCEPTION_DISPOSITION { #endif // !_INC_WINDOWS #endif // !DACCESS_COMPILE -typedef uint64_t REGHANDLE; -typedef uint64_t TRACEHANDLE; - -#ifndef _EVNTPROV_H_ -struct EVENT_DATA_DESCRIPTOR -{ - uint64_t Ptr; - uint32_t Size; - uint32_t Reserved; -}; - -struct EVENT_DESCRIPTOR -{ - uint16_t Id; - uint8_t Version; - uint8_t Channel; - uint8_t Level; - uint8_t Opcode; - uint16_t Task; - uint64_t Keyword; - -}; - -struct EVENT_FILTER_DESCRIPTOR -{ - uint64_t Ptr; - uint32_t Size; - uint32_t Type; -}; - -__forceinline -void -EventDataDescCreate(_Out_ EVENT_DATA_DESCRIPTOR * EventDataDescriptor, _In_opt_ const void * DataPtr, uint32_t DataSize) -{ - EventDataDescriptor->Ptr = (uint64_t)DataPtr; - EventDataDescriptor->Size = DataSize; - EventDataDescriptor->Reserved = 0; -} -#endif // _EVNTPROV_H_ - extern uint32_t g_RhNumberOfProcessors; #ifdef TARGET_UNIX @@ -726,13 +686,6 @@ typedef void (*PalHijackCallback)(_In_ NATIVE_CONTEXT* pThreadContext, _In_opt_ REDHAWK_PALIMPORT void REDHAWK_PALAPI PalHijack(HANDLE hThread, _In_opt_ void* pThreadToHijack); REDHAWK_PALIMPORT UInt32_BOOL REDHAWK_PALAPI PalRegisterHijackCallback(_In_ PalHijackCallback callback); -#ifdef FEATURE_ETW -REDHAWK_PALIMPORT bool REDHAWK_PALAPI PalEventEnabled(REGHANDLE regHandle, _In_ const EVENT_DESCRIPTOR* eventDescriptor); -REDHAWK_PALIMPORT uint32_t REDHAWK_PALAPI PalEventRegister(const GUID * arg1, void * arg2, void * arg3, REGHANDLE * arg4); -REDHAWK_PALIMPORT uint32_t REDHAWK_PALAPI PalEventUnregister(REGHANDLE arg1); -REDHAWK_PALIMPORT uint32_t REDHAWK_PALAPI PalEventWrite(REGHANDLE arg1, const EVENT_DESCRIPTOR * arg2, uint32_t arg3, EVENT_DATA_DESCRIPTOR * arg4); -#endif - REDHAWK_PALIMPORT UInt32_BOOL REDHAWK_PALAPI PalAllocateThunksFromTemplate(_In_ HANDLE hTemplateModule, uint32_t templateRva, size_t templateSize, _Outptr_result_bytebuffer_(templateSize) void** newThunksOut); REDHAWK_PALIMPORT UInt32_BOOL REDHAWK_PALAPI PalFreeThunksFromTemplate(_In_ void *pBaseAddress); diff --git a/src/coreclr/nativeaot/Runtime/eventpipe/CMakeLists.txt b/src/coreclr/nativeaot/Runtime/eventpipe/CMakeLists.txt index 674a26218da17..19ea228f01b62 100644 --- a/src/coreclr/nativeaot/Runtime/eventpipe/CMakeLists.txt +++ b/src/coreclr/nativeaot/Runtime/eventpipe/CMakeLists.txt @@ -39,6 +39,32 @@ set_source_files_properties(${EventingHeaders} PROPERTIES GENERATED TRUE) add_custom_target(aot_eventing_headers DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/aot_eventing_headers.timestamp) +if(CLR_CMAKE_TARGET_WIN32) + + set(ETW_PROVIDER_SCRIPT ${CLR_DIR}/scripts/genEtwProvider.py) + + set (ETW_PROVIDER_OUTPUTS + ${GENERATED_INCLUDE_DIR}/etw/ClrEtwAll.h + ${GENERATED_INCLUDE_DIR}/etw/ClrEtwAll.rc + ${GENERATED_INCLUDE_DIR}/etw/etwmacros.h + ${GENERATED_INCLUDE_DIR}/etw/ClrEtwAll_MSG00001.bin + ${GENERATED_INCLUDE_DIR}/etw/ClrEtwAllTEMP.bin + ${GENERATED_INCLUDE_DIR}/clrxplatevents.h + ) + + set_source_files_properties(${ETW_PROVIDER_OUTPUTS} PROPERTIES GENERATED TRUE) + + add_custom_command( + OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/eventprovider.timestamp + COMMAND ${Python_EXECUTABLE} ${ETW_PROVIDER_SCRIPT} --man ${EVENT_MANIFEST} --exc ${EVENT_EXCLUSIONS} --intermediate ${GENERATED_INCLUDE_DIR} + COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/eventprovider.timestamp + DEPENDS ${EVENT_MANIFEST} ${EVENT_EXCLUSIONS} ${ETW_PROVIDER_SCRIPT} + ) + + add_custom_target(aot_etw_headers DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/eventprovider.timestamp) + +endif() + # EventPipe provider source, currently dotnetruntime provider and helper file set(GENERATE_EVENTPIPE_SCRIPT ${CLR_DIR}/scripts/genEventPipe.py) set(GENERATE_COMMAND ${Python_EXECUTABLE} ${GENERATE_EVENTPIPE_SCRIPT} --man ${EVENT_MANIFEST} --exc ${EVENT_EXCLUSIONS} --inc ${EVENT_INCLUSION_FILE} --intermediate ${CMAKE_CURRENT_BINARY_DIR} --runtimeflavor nativeaot ${NONEXTERN_ARG}) @@ -73,6 +99,11 @@ set(EP_GENERATED_HEADER_PATH "${GENERATED_INCLUDE_DIR}") include (${CLR_SRC_NATIVE_DIR}/eventpipe/configure.cmake) include_directories(${EP_GENERATED_HEADER_PATH}) +if (CLR_CMAKE_TARGET_WIN32) + set(EP_ETW_GENERATED_HEADER_PATH "${GENERATED_INCLUDE_DIR}/etw") + include_directories(${EP_ETW_GENERATED_HEADER_PATH}) +endif() + set(AOT_EVENTPIPE_SHIM_DIR "${CMAKE_CURRENT_SOURCE_DIR}") set (CONTAINER_SOURCES "") @@ -151,7 +182,7 @@ if(CLR_CMAKE_HOST_UNIX) set_source_files_properties(${EVENTPIPE_SOURCES} PROPERTIES COMPILE_OPTIONS -xc++) endif(CLR_CMAKE_HOST_UNIX) -if (WIN32) +if (CLR_CMAKE_TARGET_WIN32) set_source_files_properties(${EVENTPIPE_SOURCES} PROPERTIES COMPILE_FLAGS "/FI\"${RUNTIME_DIR}/eventpipe/NativeaotEventPipeSupport.h\"") endif() @@ -190,8 +221,16 @@ if (FEATURE_EVENT_TRACE) ${RUNTIME_DIR}/eventtrace_gcheap.cpp ) endif() + + if(CLR_CMAKE_TARGET_WIN32) + set_source_files_properties(${GEN_EVENTPIPE_PROVIDER_SOURCES} PROPERTIES COMPILE_FLAGS "/FI\"${RUNTIME_DIR}/eventpipe/NativeaotEventPipeSupport.h\"") + set_source_files_properties(${GEN_EVENTPIPE_PLAT_AGNOSTIC_SOURCES} PROPERTIES COMPILE_FLAGS "/FI\"${RUNTIME_DIR}/eventpipe/NativeaotEventPipeSupport.h\"") + set_source_files_properties(${AOT_EVENTTRACE_SOURCES} PROPERTIES COMPILE_FLAGS "/FI\"${RUNTIME_DIR}/eventpipe/NativeaotEventPipeSupport.h\"") + endif() endif() + + list(APPEND EVENTPIPE_SOURCES ${AOT_EVENTPIPE_SHIM_SOURCES} ${AOT_EVENTPIPE_SHIM_HEADERS} @@ -215,6 +254,9 @@ list(APPEND AOT_EVENTPIPE_DISABLED_SOURCES add_library(eventpipe-enabled STATIC ${EVENTPIPE_SOURCES}) add_dependencies(eventpipe-enabled aot_eventing_headers) +if(CLR_CMAKE_TARGET_WIN32) + add_dependencies(eventpipe-enabled aot_etw_headers) +endif() add_library(eventpipe-disabled STATIC ${AOT_EVENTPIPE_DISABLED_SOURCES}) add_dependencies(eventpipe-disabled aot_eventing_headers) @@ -222,6 +264,7 @@ add_dependencies(eventpipe-disabled aot_eventing_headers) if (CLR_CMAKE_TARGET_WIN32) add_library(eventpipe-enabled.GuardCF STATIC ${EVENTPIPE_SOURCES}) add_dependencies(eventpipe-enabled.GuardCF aot_eventing_headers) + add_dependencies(eventpipe-enabled.GuardCF aot_etw_headers) add_library(eventpipe-disabled.GuardCF STATIC ${AOT_EVENTPIPE_DISABLED_SOURCES}) add_dependencies(eventpipe-disabled.GuardCF aot_eventing_headers) set_target_properties(eventpipe-enabled.GuardCF PROPERTIES CLR_CONTROL_FLOW_GUARD ON) @@ -233,4 +276,4 @@ install_static_library(eventpipe-disabled aotsdk nativeaot) if (CLR_CMAKE_TARGET_WIN32) install_static_library(eventpipe-enabled.GuardCF aotsdk nativeaot) install_static_library(eventpipe-disabled.GuardCF aotsdk nativeaot) -endif (CLR_CMAKE_TARGET_WIN32) +endif (CLR_CMAKE_TARGET_WIN32) \ No newline at end of file diff --git a/src/coreclr/nativeaot/Runtime/eventtrace.cpp b/src/coreclr/nativeaot/Runtime/eventtrace.cpp index c598b71fb7648..b4c72a41bee28 100644 --- a/src/coreclr/nativeaot/Runtime/eventtrace.cpp +++ b/src/coreclr/nativeaot/Runtime/eventtrace.cpp @@ -78,8 +78,8 @@ void EventTracing_Initialize() MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.IsEnabled = FALSE; // Register the ETW providers with the system. - RH_ETW_REGISTER_Microsoft_Windows_DotNETRuntimePrivate(); - RH_ETW_REGISTER_Microsoft_Windows_DotNETRuntime(); + EventRegisterMicrosoft_Windows_DotNETRuntimePrivate(); + EventRegisterMicrosoft_Windows_DotNETRuntime(); MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_DotNETRuntimePrivateHandle; MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context.RegistrationHandle = Microsoft_Windows_DotNETRuntimeHandle; @@ -94,10 +94,6 @@ enum CallbackProviderIndex DotNETRuntimePrivate = 3 }; -// @TODO -int const EVENT_CONTROL_CODE_ENABLE_PROVIDER=1; -int const EVENT_CONTROL_CODE_DISABLE_PROVIDER=0; - void EtwCallbackCommon( CallbackProviderIndex ProviderIndex, ULONG ControlCode, @@ -180,7 +176,7 @@ void EtwCallbackCommon( #ifdef FEATURE_ETW void EtwCallback( - GUID * /*SourceId*/, + const GUID * /*SourceId*/, uint32_t IsEnabled, uint8_t Level, uint64_t MatchAnyKeyword, diff --git a/src/coreclr/nativeaot/Runtime/eventtrace_bulktype.cpp b/src/coreclr/nativeaot/Runtime/eventtrace_bulktype.cpp index cdf351b853ca8..9adeb16d6d02e 100644 --- a/src/coreclr/nativeaot/Runtime/eventtrace_bulktype.cpp +++ b/src/coreclr/nativeaot/Runtime/eventtrace_bulktype.cpp @@ -15,8 +15,6 @@ #if defined(FEATURE_EVENT_TRACE) -#define Win32EventWrite PalEventWrite - //--------------------------------------------------------------------------------------- // BulkTypeValue / BulkTypeEventLogger: These take care of batching up types so they can // be logged via ETW in bulk @@ -128,7 +126,7 @@ void BulkTypeEventLogger::FireBulkTypeEvent() } } - Win32EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &BulkType, iDesc, EventData); + EventWrite(Microsoft_Windows_DotNETRuntimeHandle, &BulkType, iDesc, EventData); // Reset state m_nBulkTypeValueCount = 0; diff --git a/src/coreclr/nativeaot/Runtime/eventtrace_etw.h b/src/coreclr/nativeaot/Runtime/eventtrace_etw.h index 49540810150f9..d4994f68d928e 100644 --- a/src/coreclr/nativeaot/Runtime/eventtrace_etw.h +++ b/src/coreclr/nativeaot/Runtime/eventtrace_etw.h @@ -10,7 +10,25 @@ #ifdef FEATURE_ETW -#include "EtwEvents.h" +#include +extern "C" { + VOID EtwCallback( + _In_ const GUID * SourceId, + _In_ uint32_t ControlCode, + _In_ uint8_t Level, + _In_ uint64_t MatchAnyKeyword, + _In_ uint64_t MatchAllKeyword, + _In_opt_ EVENT_FILTER_DESCRIPTOR * FilterData, + _Inout_opt_ void * CallbackContext); +} + +// +// Python script generated code will call this function when MCGEN_PRIVATE_ENABLE_CALLBACK_V2 is defined +// to enable runtime events +#define MCGEN_PRIVATE_ENABLE_CALLBACK_V2(SourceId, ControlCode, Level, MatchAnyKeyword, MatchAllKeyword, FilterData, CallbackContext) \ + EtwCallback(SourceId, ControlCode, Level, MatchAnyKeyword, MatchAllKeyword, FilterData, CallbackContext) + +#include "ClrEtwAll.h" #undef ETW_TRACING_INITIALIZED #define ETW_TRACING_INITIALIZED(RegHandle) (RegHandle != NULL) diff --git a/src/coreclr/nativeaot/Runtime/gcrhenv.cpp b/src/coreclr/nativeaot/Runtime/gcrhenv.cpp index 299bf3ecfecf8..410402bff506d 100644 --- a/src/coreclr/nativeaot/Runtime/gcrhenv.cpp +++ b/src/coreclr/nativeaot/Runtime/gcrhenv.cpp @@ -503,7 +503,7 @@ uint32_t RedhawkGCInterface::GetGCDescSize(void * pType) COOP_PINVOKE_HELPER(FC_BOOL_RET, RhCompareObjectContentsAndPadding, (Object* pObj1, Object* pObj2)) { - ASSERT(pObj1->GetMethodTable()->IsEquivalentTo(pObj2->GetMethodTable())); + ASSERT(pObj1->GetMethodTable() == pObj2->GetMethodTable()); ASSERT(pObj1->GetMethodTable()->IsValueType()); MethodTable * pEEType = pObj1->GetMethodTable(); diff --git a/src/coreclr/nativeaot/Runtime/inc/CommonTypes.h b/src/coreclr/nativeaot/Runtime/inc/CommonTypes.h index 0ad3a133519ed..fde2d9247e07b 100644 --- a/src/coreclr/nativeaot/Runtime/inc/CommonTypes.h +++ b/src/coreclr/nativeaot/Runtime/inc/CommonTypes.h @@ -44,7 +44,11 @@ typedef void* LPVOID; typedef uint32_t UINT; typedef void* PVOID; typedef uint64_t ULONGLONG; +#ifdef _MSC_VER +typedef unsigned long ULONG; +#else typedef uint32_t ULONG; +#endif typedef int64_t LONGLONG; typedef uint8_t BYTE; typedef uint16_t UINT16; diff --git a/src/coreclr/nativeaot/Runtime/inc/MethodTable.h b/src/coreclr/nativeaot/Runtime/inc/MethodTable.h index aff643f4ebae1..e9d48bf7a5b9b 100644 --- a/src/coreclr/nativeaot/Runtime/inc/MethodTable.h +++ b/src/coreclr/nativeaot/Runtime/inc/MethodTable.h @@ -131,23 +131,24 @@ class MethodTable // simplified version of MethodTable. See LimitedEEType definition below. EETypeKindMask = 0x00030000, - // Unused = 0x00040000, + // This type has optional fields present. + OptionalFieldsFlag = 0x00040000, + + // GC depends on this bit, this bit must be zero + CollectibleFlag = 0x00200000, IsDynamicTypeFlag = 0x00080000, - // This MethodTable represents a type which requires finalization + // GC depends on this bit, this type requires finalization HasFinalizerFlag = 0x00100000, - // This type contain gc pointers - HasPointersFlag = 0x00200000, + // GC depends on this bit, this type contain gc pointers + HasPointersFlag = 0x01000000, // This type is generic and one or more of it's type parameters is co- or contra-variant. This only // applies to interface and delegate types. GenericVarianceFlag = 0x00800000, - // This type has optional fields present. - OptionalFieldsFlag = 0x01000000, - // This type is generic. IsGenericFlag = 0x02000000, @@ -162,6 +163,7 @@ class MethodTable enum ExtendedFlags { HasEagerFinalizerFlag = 0x0001, + // GC depends on this bit, this type has a critical finalizer HasCriticalFinalizerFlag = 0x0002, IsTrackedReferenceWithFinalizerFlag = 0x0004, }; @@ -260,22 +262,6 @@ class MethodTable return (m_uFlags & OptionalFieldsFlag) != 0; } - bool IsEquivalentTo(MethodTable * pOtherEEType) - { - if (this == pOtherEEType) - return true; - - MethodTable * pThisEEType = this; - - if (pThisEEType->IsParameterizedType() && pOtherEEType->IsParameterizedType()) - { - return pThisEEType->GetRelatedParameterType()->IsEquivalentTo(pOtherEEType->GetRelatedParameterType()) && - pThisEEType->GetParameterizedTypeShape() == pOtherEEType->GetParameterizedTypeShape(); - } - - return false; - } - // How many vtable slots are there? uint16_t GetNumVtableSlots() { return m_usNumVtableSlots; } @@ -348,4 +334,3 @@ class MethodTable }; #pragma warning(pop) - diff --git a/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp b/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp index eee2b7d3c1ae3..7d2cfa8c426bc 100644 --- a/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp +++ b/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp @@ -15,7 +15,6 @@ #include #include #include -#include #include "holder.h" @@ -625,26 +624,6 @@ REDHAWK_PALEXPORT bool REDHAWK_PALAPI PalStartEventPipeHelperThread(_In_ Backgro return PalStartBackgroundWork(callback, pCallbackContext, FALSE); } -REDHAWK_PALEXPORT bool REDHAWK_PALAPI PalEventEnabled(REGHANDLE regHandle, _In_ const EVENT_DESCRIPTOR* eventDescriptor) -{ - return !!EventEnabled(regHandle, eventDescriptor); -} - -REDHAWK_PALEXPORT uint32_t REDHAWK_PALAPI PalEventRegister(const GUID * arg1, void * arg2, void * arg3, REGHANDLE * arg4) -{ - return EventRegister(arg1, reinterpret_cast(arg2), arg3, arg4); -} - -REDHAWK_PALEXPORT uint32_t REDHAWK_PALAPI PalEventUnregister(REGHANDLE arg1) -{ - return EventUnregister(arg1); -} - -REDHAWK_PALEXPORT uint32_t REDHAWK_PALAPI PalEventWrite(REGHANDLE arg1, const EVENT_DESCRIPTOR * arg2, uint32_t arg3, EVENT_DATA_DESCRIPTOR * arg4) -{ - return EventWrite(arg1, arg2, arg3, arg4); -} - REDHAWK_PALEXPORT void REDHAWK_PALAPI PalTerminateCurrentProcess(uint32_t arg2) { TerminateProcess(GetCurrentProcess(), arg2); diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/EETypePtr.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/EETypePtr.cs index 4f7a9caacfed3..b15d747007844 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/EETypePtr.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/EETypePtr.cs @@ -164,7 +164,7 @@ internal bool IsEnum { // Q: When is an enum type a constructed generic type? // A: When it's nested inside a generic type. - if (!(IsDefType)) + if (!IsDefType) return false; // Generic type definitions that return true for IsPrimitive are type definitions of generic enums. diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs index 71946fb85dc41..5ad028deda118 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.NativeAot.cs @@ -948,33 +948,6 @@ private unsafe bool TryGetOrCreateObjectForComInstanceInternal( out IntPtr inner); using ComHolder releaseIdentity = new ComHolder(identity); - if (flags.HasFlag(CreateObjectFlags.Unwrap)) - { - ComInterfaceDispatch* comInterfaceDispatch = TryGetComInterfaceDispatch(identity); - if (comInterfaceDispatch != null) - { - // If we found a managed object wrapper in this ComWrappers instance - // and it's has the same identity pointer as the one we're creating a NativeObjectWrapper for, - // unwrap it. We don't AddRef the wrapper as we don't take a reference to it. - // - // A managed object can have multiple managed object wrappers, with a max of one per context. - // Let's say we have a managed object A and ComWrappers instances C1 and C2. Let B1 and B2 be the - // managed object wrappers for A created with C1 and C2 respectively. - // If we are asked to create an EOC for B1 with the unwrap flag on the C2 ComWrappers instance, - // we will create a new wrapper. In this scenario, we'll only unwrap B2. - object unwrapped = ComInterfaceDispatch.GetInstance(comInterfaceDispatch); - if (_ccwTable.TryGetValue(unwrapped, out ManagedObjectWrapperHolder? unwrappedWrapperInThisContext)) - { - // The unwrapped object has a CCW in this context. Compare with identity - // so we can see if it's the CCW for the unwrapped object in this context. - if (unwrappedWrapperInThisContext.ComIp == identity) - { - retValue = unwrapped; - return true; - } - } - } - } if (!flags.HasFlag(CreateObjectFlags.UniqueInstance)) { @@ -1018,6 +991,33 @@ private unsafe bool TryGetOrCreateObjectForComInstanceInternal( return true; } } + if (flags.HasFlag(CreateObjectFlags.Unwrap)) + { + ComInterfaceDispatch* comInterfaceDispatch = TryGetComInterfaceDispatch(identity); + if (comInterfaceDispatch != null) + { + // If we found a managed object wrapper in this ComWrappers instance + // and it's has the same identity pointer as the one we're creating a NativeObjectWrapper for, + // unwrap it. We don't AddRef the wrapper as we don't take a reference to it. + // + // A managed object can have multiple managed object wrappers, with a max of one per context. + // Let's say we have a managed object A and ComWrappers instances C1 and C2. Let B1 and B2 be the + // managed object wrappers for A created with C1 and C2 respectively. + // If we are asked to create an EOC for B1 with the unwrap flag on the C2 ComWrappers instance, + // we will create a new wrapper. In this scenario, we'll only unwrap B2. + object unwrapped = ComInterfaceDispatch.GetInstance(comInterfaceDispatch); + if (_ccwTable.TryGetValue(unwrapped, out ManagedObjectWrapperHolder? unwrappedWrapperInThisContext)) + { + // The unwrapped object has a CCW in this context. Compare with identity + // so we can see if it's the CCW for the unwrapped object in this context. + if (unwrappedWrapperInThisContext.ComIp == identity) + { + retValue = unwrapped; + return true; + } + } + } + } } retValue = CreateObject(identity, flags); diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/RuntimeExceptionHelpers.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/RuntimeExceptionHelpers.cs index 88f21fdf0561b..527e18e03c6f0 100644 --- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/RuntimeExceptionHelpers.cs +++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/RuntimeExceptionHelpers.cs @@ -138,7 +138,7 @@ public static void ReportUnhandledException(Exception exception) // This is the classlib-provided fail-fast function that will be invoked whenever the runtime // needs to cause the process to exit. It is the classlib's opportunity to customize the // termination behavior in whatever way necessary. - [RuntimeExport("FailFast")] + [RuntimeExport("RuntimeFailFast")] internal static void RuntimeFailFast(RhFailFastReason reason, Exception? exception, IntPtr pExAddress, IntPtr pExContext) { if (!SafeToPerformRichExceptionSupport) diff --git a/src/coreclr/nativeaot/Test.CoreLib/src/System/RuntimeExceptionHelpers.cs b/src/coreclr/nativeaot/Test.CoreLib/src/System/RuntimeExceptionHelpers.cs index 96ced47305322..8100d936e66b0 100644 --- a/src/coreclr/nativeaot/Test.CoreLib/src/System/RuntimeExceptionHelpers.cs +++ b/src/coreclr/nativeaot/Test.CoreLib/src/System/RuntimeExceptionHelpers.cs @@ -83,7 +83,7 @@ public static Exception GetRuntimeException(ExceptionIDs id) // This is the classlib-provided fail-fast function that will be invoked whenever the runtime // needs to cause the process to exit. It is the classlib's opportunity to customize the // termination behavior in whatever way necessary. - [RuntimeExport("FailFast")] + [RuntimeExport("RuntimeFailFast")] internal static void RuntimeFailFast(RhFailFastReason reason, Exception exception, IntPtr pExAddress, IntPtr pExContext) { RuntimeImports.RhpFallbackFailFast(); diff --git a/src/coreclr/nativeaot/docs/android-bionic.md b/src/coreclr/nativeaot/docs/android-bionic.md index 8f208492ed497..39694a8130fa7 100644 --- a/src/coreclr/nativeaot/docs/android-bionic.md +++ b/src/coreclr/nativeaot/docs/android-bionic.md @@ -6,6 +6,8 @@ Not a full Android experience is available - it's only possible to publish for t The minimum API level is 21 at the time of writing the document, but search for AndroidApiLevelMin in this repo for more up-to-date information. +NOTE: There's an existing issue that puts the minimum Android version where things work to Android 10: https://github.com/dotnet/runtime/issues/92196 + To build for Bionic: * Ensure you have [Android NDK](https://developer.android.com/ndk/downloads) for your system downloaded and extracted somewhere. We build and test with NDK r23c but anything newer should also work. Double check with the NDK version referenced [here](https://github.com/dotnet/runtime/blob/main/docs/workflow/testing/libraries/testing-android.md), which might be more up-to-date than this document. diff --git a/src/coreclr/pal/inc/pal.h b/src/coreclr/pal/inc/pal.h index 2bf5b17344d3d..b29d207511788 100644 --- a/src/coreclr/pal/inc/pal.h +++ b/src/coreclr/pal/inc/pal.h @@ -1048,6 +1048,15 @@ CreateMutexExW( IN DWORD dwFlags, IN DWORD dwDesiredAccess); +PALIMPORT +HANDLE +PALAPI +PAL_CreateMutexW( + IN BOOL bInitialOwner, + IN LPCWSTR lpName, + IN LPSTR lpSystemCallErrors, + IN DWORD dwSystemCallErrorsBufferSize); + // CreateMutexExW: dwFlags #define CREATE_MUTEX_INITIAL_OWNER ((DWORD)0x1) @@ -1061,6 +1070,14 @@ OpenMutexW( IN BOOL bInheritHandle, IN LPCWSTR lpName); +PALIMPORT +HANDLE +PALAPI +PAL_OpenMutexW( + IN LPCWSTR lpName, + IN LPSTR lpSystemCallErrors, + IN DWORD dwSystemCallErrorsBufferSize); + #ifdef UNICODE #define OpenMutex OpenMutexW #endif @@ -2085,6 +2102,34 @@ typedef struct _KNONVOLATILE_CONTEXT_POINTERS { } KNONVOLATILE_CONTEXT_POINTERS, *PKNONVOLATILE_CONTEXT_POINTERS; +typedef struct _IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY { + DWORD BeginAddress; + union { + DWORD UnwindData; + struct { + DWORD Flag : 2; + DWORD FunctionLength : 11; + DWORD RegF : 3; + DWORD RegI : 4; + DWORD H : 1; + DWORD CR : 2; + DWORD FrameSize : 9; + }; + }; +} IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY, * PIMAGE_ARM64_RUNTIME_FUNCTION_ENTRY; + +typedef union IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_XDATA { + ULONG HeaderData; + struct { + ULONG FunctionLength : 18; // in words (2 bytes) + ULONG Version : 2; + ULONG ExceptionDataPresent : 1; + ULONG EpilogInHeader : 1; + ULONG EpilogCount : 5; // number of epilogs or byte index of the first unwind code for the one only epilog + ULONG CodeWords : 5; // number of dwords with unwind codes + }; +} IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_XDATA; + #elif defined(HOST_LOONGARCH64) // Please refer to src/coreclr/pal/src/arch/loongarch64/asmconstants.h @@ -3139,13 +3184,17 @@ enum { // // A function table entry is generated for each frame function. // +#if defined(HOST_ARM64) +typedef IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY RUNTIME_FUNCTION, *PRUNTIME_FUNCTION; +#else // HOST_ARM64 typedef struct _RUNTIME_FUNCTION { DWORD BeginAddress; -#ifdef TARGET_AMD64 +#ifdef HOST_AMD64 DWORD EndAddress; #endif DWORD UnwindData; } RUNTIME_FUNCTION, *PRUNTIME_FUNCTION; +#endif // HOST_ARM64 #define STANDARD_RIGHTS_REQUIRED (0x000F0000L) #define SYNCHRONIZE (0x00100000L) diff --git a/src/coreclr/pal/src/configure.cmake b/src/coreclr/pal/src/configure.cmake index 304bc3461e9f3..5366932629c62 100644 --- a/src/coreclr/pal/src/configure.cmake +++ b/src/coreclr/pal/src/configure.cmake @@ -133,6 +133,7 @@ check_function_exists(semget HAS_SYSV_SEMAPHORES) check_function_exists(pthread_mutex_init HAS_PTHREAD_MUTEXES) check_function_exists(ttrace HAVE_TTRACE) check_function_exists(pipe2 HAVE_PIPE2) +check_function_exists(strerrorname_np HAVE_STRERRORNAME_NP) check_cxx_source_compiles(" #include diff --git a/src/coreclr/pal/src/include/pal/mutex.hpp b/src/coreclr/pal/src/include/pal/mutex.hpp index 464f0f72afb45..016668dafb162 100644 --- a/src/coreclr/pal/src/include/pal/mutex.hpp +++ b/src/coreclr/pal/src/include/pal/mutex.hpp @@ -32,6 +32,7 @@ namespace CorUnix PAL_ERROR InternalCreateMutex( + SharedMemorySystemCallErrors *errors, CPalThread *pThread, LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, @@ -47,6 +48,7 @@ namespace CorUnix PAL_ERROR InternalOpenMutex( + SharedMemorySystemCallErrors *errors, CPalThread *pThread, LPCSTR lpName, HANDLE *phMutex @@ -151,10 +153,10 @@ enum class MutexTryAcquireLockResult class MutexHelpers { public: - static void InitializeProcessSharedRobustRecursiveMutex(pthread_mutex_t *mutex); + static void InitializeProcessSharedRobustRecursiveMutex(SharedMemorySystemCallErrors *errors, pthread_mutex_t *mutex); static void DestroyMutex(pthread_mutex_t *mutex); - static MutexTryAcquireLockResult TryAcquireLock(pthread_mutex_t *mutex, DWORD timeoutMilliseconds); + static MutexTryAcquireLockResult TryAcquireLock(SharedMemorySystemCallErrors *errors, pthread_mutex_t *mutex, DWORD timeoutMilliseconds); static void ReleaseLock(pthread_mutex_t *mutex); }; #endif // NAMED_MUTEX_USE_PTHREAD_MUTEX @@ -172,7 +174,7 @@ class NamedMutexSharedData bool m_isAbandoned; public: - NamedMutexSharedData(); + NamedMutexSharedData(SharedMemorySystemCallErrors *errors); ~NamedMutexSharedData(); #if NAMED_MUTEX_USE_PTHREAD_MUTEX @@ -214,10 +216,10 @@ class NamedMutexProcessData : public SharedMemoryProcessDataBase bool m_hasRefFromLockOwnerThread; public: - static SharedMemoryProcessDataHeader *CreateOrOpen(LPCSTR name, bool acquireLockIfCreated, bool *createdRef); - static SharedMemoryProcessDataHeader *Open(LPCSTR name); + static SharedMemoryProcessDataHeader *CreateOrOpen(SharedMemorySystemCallErrors *errors, LPCSTR name, bool acquireLockIfCreated, bool *createdRef); + static SharedMemoryProcessDataHeader *Open(SharedMemorySystemCallErrors *errors, LPCSTR name); private: - static SharedMemoryProcessDataHeader *CreateOrOpen(LPCSTR name, bool createIfNotExist, bool acquireLockIfCreated, bool *createdRef); + static SharedMemoryProcessDataHeader *CreateOrOpen(SharedMemorySystemCallErrors *errors, LPCSTR name, bool createIfNotExist, bool acquireLockIfCreated, bool *createdRef); public: NamedMutexProcessData( @@ -248,7 +250,7 @@ class NamedMutexProcessData : public SharedMemoryProcessDataBase void SetNextInThreadOwnedNamedMutexList(NamedMutexProcessData *next); public: - MutexTryAcquireLockResult TryAcquireLock(DWORD timeoutMilliseconds); + MutexTryAcquireLockResult TryAcquireLock(SharedMemorySystemCallErrors *errors, DWORD timeoutMilliseconds); void ReleaseLock(); void Abandon(); private: diff --git a/src/coreclr/pal/src/include/pal/sharedmemory.h b/src/coreclr/pal/src/include/pal/sharedmemory.h index 88834b93d0673..fce6d2859cc05 100644 --- a/src/coreclr/pal/src/include/pal/sharedmemory.h +++ b/src/coreclr/pal/src/include/pal/sharedmemory.h @@ -85,6 +85,19 @@ class SharedMemoryException DWORD GetErrorCode() const; }; +class SharedMemorySystemCallErrors +{ +private: + char *m_buffer; + int m_bufferSize; + int m_length; + bool m_isTracking; + +public: + SharedMemorySystemCallErrors(char *buffer, int bufferSize); + void Append(LPCSTR format, ...); +}; + class SharedMemoryHelpers { private: @@ -106,20 +119,22 @@ class SharedMemoryHelpers static void BuildSharedFilesPath(PathCharString& destination, const char *suffix, int suffixByteCount); static bool AppendUInt32String(PathCharString& destination, UINT32 value); - static bool EnsureDirectoryExists(const char *path, bool isGlobalLockAcquired, bool createIfNotExist = true, bool isSystemDirectory = false); + static bool EnsureDirectoryExists(SharedMemorySystemCallErrors *errors, const char *path, bool isGlobalLockAcquired, bool createIfNotExist = true, bool isSystemDirectory = false); private: - static int Open(LPCSTR path, int flags, mode_t mode = static_cast(0)); + static int Open(SharedMemorySystemCallErrors *errors, LPCSTR path, int flags, mode_t mode = static_cast(0)); public: - static int OpenDirectory(LPCSTR path); - static int CreateOrOpenFile(LPCSTR path, bool createIfNotExist = true, bool *createdRef = nullptr); + static int OpenDirectory(SharedMemorySystemCallErrors *errors, LPCSTR path); + static int CreateOrOpenFile(SharedMemorySystemCallErrors *errors, LPCSTR path, bool createIfNotExist = true, bool *createdRef = nullptr); static void CloseFile(int fileDescriptor); - static SIZE_T GetFileSize(int fileDescriptor); - static void SetFileSize(int fileDescriptor, SIZE_T byteCount); + static int ChangeMode(LPCSTR path, mode_t mode); + + static SIZE_T GetFileSize(SharedMemorySystemCallErrors *errors, LPCSTR filePath, int fileDescriptor); + static void SetFileSize(SharedMemorySystemCallErrors *errors, LPCSTR filePath, int fileDescriptor, SIZE_T byteCount); - static void *MemoryMapFile(int fileDescriptor, SIZE_T byteCount); + static void *MemoryMapFile(SharedMemorySystemCallErrors *errors, LPCSTR filePath, int fileDescriptor, SIZE_T byteCount); - static bool TryAcquireFileLock(int fileDescriptor, int operation); + static bool TryAcquireFileLock(SharedMemorySystemCallErrors *errors, int fileDescriptor, int operation); static void ReleaseFileLock(int fileDescriptor); static void VerifyStringOperation(bool success); @@ -207,7 +222,7 @@ class SharedMemoryProcessDataHeader SharedMemoryProcessDataHeader *m_nextInProcessDataHeaderList; public: - static SharedMemoryProcessDataHeader *CreateOrOpen(LPCSTR name, SharedMemorySharedDataHeader requiredSharedDataHeader, SIZE_T sharedDataByteCount, bool createIfNotExist, bool *createdRef); + static SharedMemoryProcessDataHeader *CreateOrOpen(SharedMemorySystemCallErrors *errors, LPCSTR name, SharedMemorySharedDataHeader requiredSharedDataHeader, SIZE_T sharedDataByteCount, bool createIfNotExist, bool *createdRef); public: static SharedMemoryProcessDataHeader *PalObject_GetProcessDataHeader(CorUnix::IPalObject *object); @@ -260,7 +275,7 @@ class SharedMemoryManager public: static void AcquireCreationDeletionProcessLock(); static void ReleaseCreationDeletionProcessLock(); - static void AcquireCreationDeletionFileLock(); + static void AcquireCreationDeletionFileLock(SharedMemorySystemCallErrors *errors); static void ReleaseCreationDeletionFileLock(); public: diff --git a/src/coreclr/pal/src/include/pal/utils.h b/src/coreclr/pal/src/include/pal/utils.h index 83cf2b104c1ff..fdd5b3b965a16 100644 --- a/src/coreclr/pal/src/include/pal/utils.h +++ b/src/coreclr/pal/src/include/pal/utils.h @@ -212,3 +212,5 @@ class StringHolder }; #endif /* _PAL_UTILS_H_ */ + +const char *GetFriendlyErrorCodeString(int errorCode); diff --git a/src/coreclr/pal/src/misc/utils.cpp b/src/coreclr/pal/src/misc/utils.cpp index f279ef3d580c1..0d96cc991305a 100644 --- a/src/coreclr/pal/src/misc/utils.cpp +++ b/src/coreclr/pal/src/misc/utils.cpp @@ -366,3 +366,61 @@ BOOL IsRunningOnMojaveHardenedRuntime() } #endif // __APPLE__ + +const char *GetFriendlyErrorCodeString(int errorCode) +{ +#if HAVE_STRERRORNAME_NP + const char *error = strerrorname_np(errorCode); + if (error != nullptr) + { + return error; + } +#else // !HAVE_STRERRORNAME_NP + switch (errorCode) + { + case EACCES: return "EACCES"; + #if EAGAIN == EWOULDBLOCK + case EAGAIN: return "EAGAIN/EWOULDBLOCK"; + #else + case EAGAIN: return "EAGAIN"; + case EWOULDBLOCK: return "EWOULDBLOCK"; + #endif + case EBADF: return "EBADF"; + case EBUSY: return "EBUSY"; + case EDQUOT: return "EDQUOT"; + case EEXIST: return "EEXIST"; + case EFAULT: return "EFAULT"; + case EFBIG: return "EFBIG"; + case EINVAL: return "EINVAL"; + case EINTR: return "EINTR"; + case EIO: return "EIO"; + case EISDIR: return "EISDIR"; + case ELOOP: return "ELOOP"; + case EMFILE: return "EMFILE"; + case EMLINK: return "EMLINK"; + case ENAMETOOLONG: return "ENAMETOOLONG"; + case ENFILE: return "ENFILE"; + case ENODEV: return "ENODEV"; + case ENOENT: return "ENOENT"; + case ENOLCK: return "ENOLCK"; + case ENOMEM: return "ENOMEM"; + case ENOSPC: return "ENOSPC"; + #if ENOTSUP == EOPNOTSUPP + case ENOTSUP: return "ENOTSUP/EOPNOTSUPP"; + #else + case ENOTSUP: return "ENOTSUP"; + case EOPNOTSUPP: return "EOPNOTSUPP"; + #endif + case ENOTDIR: return "ENOTDIR"; + case ENOTEMPTY: return "ENOTEMPTY"; + case ENXIO: return "ENXIO"; + case EOVERFLOW: return "EOVERFLOW"; + case EPERM: return "EPERM"; + case EROFS: return "EROFS"; + case ETXTBSY: return "ETXTBSY"; + case EXDEV: return "EXDEV"; + } +#endif // HAVE_STRERRORNAME_NP + + return strerror(errorCode); +} diff --git a/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp b/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp index a2342f23efa5c..ea5aae444dad0 100644 --- a/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp +++ b/src/coreclr/pal/src/sharedmemory/sharedmemory.cpp @@ -1,14 +1,17 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +#include "pal/dbgmsg.h" +SET_DEFAULT_DEBUG_CHANNEL(SHMEM); // some headers have code with asserts, so do this first + #include "pal/sharedmemory.h" -#include "pal/dbgmsg.h" #include "pal/file.hpp" #include "pal/malloc.hpp" #include "pal/thread.hpp" #include "pal/virtual.h" #include "pal/process.h" +#include "pal/utils.h" #include #include @@ -23,8 +26,6 @@ using namespace CorUnix; -SET_DEFAULT_DEBUG_CHANNEL(SHMEM); - #include "pal/sharedmemory.inl" //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -59,6 +60,71 @@ DWORD SharedMemoryException::GetErrorCode() const return m_errorCode; } +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// SharedMemorySystemCallErrors + +SharedMemorySystemCallErrors::SharedMemorySystemCallErrors(char *buffer, int bufferSize) + : m_buffer(buffer), m_bufferSize(bufferSize), m_length(0), m_isTracking(bufferSize != 0) +{ + _ASSERTE((buffer == nullptr) == (bufferSize == 0)); + _ASSERTE(bufferSize >= 0); +} + +void SharedMemorySystemCallErrors::Append(LPCSTR format, ...) +{ + if (!m_isTracking) + { + return; + } + + char *buffer = m_buffer; + _ASSERTE(buffer != nullptr); + int bufferSize = m_bufferSize; + _ASSERTE(bufferSize != 0); + int length = m_length; + _ASSERTE(length < bufferSize); + _ASSERTE(buffer[length] == '\0'); + if (length >= bufferSize - 1) + { + return; + } + + if (length != 0) + { + length++; // the previous null terminator will be changed to a space if the append succeeds + } + + va_list args; + va_start(args, format); + int result = _vsnprintf_s(buffer + length, bufferSize - length, bufferSize - 1 - length, format, args); + va_end(args); + + if (result == 0) + { + return; + } + + if (result < 0 || result >= bufferSize - length) + { + // There's not enough space to append this error, discard the append and stop tracking + if (length == 0) + { + buffer[0] = '\0'; + } + m_isTracking = false; + return; + } + + if (length != 0) + { + buffer[length - 1] = ' '; // change the previous null terminator to a space + } + + length += result; + _ASSERTE(buffer[length] == '\0'); + m_length = length; +} + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // SharedMemoryHelpers @@ -94,6 +160,7 @@ SIZE_T SharedMemoryHelpers::AlignUp(SIZE_T value, SIZE_T alignment) } bool SharedMemoryHelpers::EnsureDirectoryExists( + SharedMemorySystemCallErrors *errors, const char *path, bool isGlobalLockAcquired, bool createIfNotExist, @@ -123,15 +190,39 @@ bool SharedMemoryHelpers::EnsureDirectoryExists( if (isGlobalLockAcquired) { - if (mkdir(path, PermissionsMask_AllUsers_ReadWriteExecute) != 0) + int operationResult = mkdir(path, PermissionsMask_AllUsers_ReadWriteExecute); + if (operationResult != 0) { + if (errors != nullptr) + { + int errorCode = errno; + errors->Append( + "mkdir(\"%s\", AllUsers_ReadWriteExecute) == %d; errno == %s;", + path, + operationResult, + GetFriendlyErrorCodeString(errorCode)); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } - if (chmod(path, PermissionsMask_AllUsers_ReadWriteExecute) != 0) + + operationResult = ChangeMode(path, PermissionsMask_AllUsers_ReadWriteExecute); + if (operationResult != 0) { + if (errors != nullptr) + { + int errorCode = errno; + errors->Append( + "chmod(\"%s\", AllUsers_ReadWriteExecute) == %d; errno == %s;", + path, + operationResult, + GetFriendlyErrorCodeString(errorCode)); + } + rmdir(path); throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } + return true; } @@ -140,13 +231,35 @@ bool SharedMemoryHelpers::EnsureDirectoryExists( if (mkdtemp(tempPath.OpenStringBuffer()) == nullptr) { + if (errors != nullptr) + { + int errorCode = errno; + errors->Append( + "mkdtemp(\"%s\") == nullptr; errno == %s;", + (const char *)tempPath, + GetFriendlyErrorCodeString(errorCode)); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } - if (chmod(tempPath, PermissionsMask_AllUsers_ReadWriteExecute) != 0) + + int operationResult = ChangeMode(tempPath, PermissionsMask_AllUsers_ReadWriteExecute); + if (operationResult != 0) { + if (errors != nullptr) + { + int errorCode = errno; + errors->Append( + "chmod(\"%s\", AllUsers_ReadWriteExecute) == %d; errno == %s;", + (const char *)tempPath, + operationResult, + GetFriendlyErrorCodeString(errorCode)); + } + rmdir(tempPath); throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } + if (rename(tempPath, path) == 0) { return true; @@ -161,6 +274,27 @@ bool SharedMemoryHelpers::EnsureDirectoryExists( // If the path exists, check that it's a directory if (statResult != 0 || !(statInfo.st_mode & S_IFDIR)) { + if (errors != nullptr) + { + if (statResult != 0) + { + int errorCode = errno; + errors->Append( + "stat(\"%s\", ...) == %d; errno == %s;", + path, + statResult, + GetFriendlyErrorCodeString(errorCode)); + } + else + { + errors->Append( + "stat(\"%s\", &info) == 0; info.st_mode == 0x%x; (info.st_mode & 0x%x) == 0;", + path, + (int)statInfo.st_mode, + (int)S_IFDIR); + } + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } @@ -176,6 +310,15 @@ bool SharedMemoryHelpers::EnsureDirectoryExists( { return true; } + + if (errors != nullptr) + { + errors->Append( + "stat(\"%s\", &info) == 0; info.st_mode == 0x%x; (info.st_mode & CurrentUser_ReadWriteExecute) != CurrentUser_ReadWriteExecute;", + path, + (int)statInfo.st_mode); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } @@ -186,19 +329,27 @@ bool SharedMemoryHelpers::EnsureDirectoryExists( { return true; } - if (!createIfNotExist || chmod(path, PermissionsMask_AllUsers_ReadWriteExecute) != 0) + if (!createIfNotExist || ChangeMode(path, PermissionsMask_AllUsers_ReadWriteExecute) != 0) { // We were not asked to create the path or we weren't able to set the new permissions. // As a last resort, check that at least the current user has full access. if ((statInfo.st_mode & PermissionsMask_CurrentUser_ReadWriteExecute) != PermissionsMask_CurrentUser_ReadWriteExecute) { + if (errors != nullptr) + { + errors->Append( + "stat(\"%s\", &info) == 0; info.st_mode == 0x%x; (info.st_mode & CurrentUser_ReadWriteExecute) != CurrentUser_ReadWriteExecute;", + path, + (int)statInfo.st_mode); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } } return true; } -int SharedMemoryHelpers::Open(LPCSTR path, int flags, mode_t mode) +int SharedMemoryHelpers::Open(SharedMemorySystemCallErrors *errors, LPCSTR path, int flags, mode_t mode) { int openErrorCode; @@ -213,6 +364,7 @@ int SharedMemoryHelpers::Open(LPCSTR path, int flags, mode_t mode) openErrorCode = errno; } while (openErrorCode == EINTR); + SharedMemoryError sharedMemoryError; switch (openErrorCode) { case ENOENT: @@ -221,29 +373,48 @@ int SharedMemoryHelpers::Open(LPCSTR path, int flags, mode_t mode) return -1; case ENAMETOOLONG: - throw SharedMemoryException(static_cast(SharedMemoryError::NameTooLong)); + sharedMemoryError = SharedMemoryError::NameTooLong; + break; case EMFILE: case ENFILE: case ENOMEM: - throw SharedMemoryException(static_cast(SharedMemoryError::OutOfMemory)); + sharedMemoryError = SharedMemoryError::OutOfMemory; + break; default: - throw SharedMemoryException(static_cast(SharedMemoryError::IO)); + sharedMemoryError = SharedMemoryError::IO; + break; } + + if (sharedMemoryError != SharedMemoryError::NameTooLong && errors != nullptr) + { + errors->Append( + "open(\"%s\", 0x%x, 0x%x) == -1; errno == %s;", + path, + flags, + (int)mode, + GetFriendlyErrorCodeString(openErrorCode)); + } + + throw SharedMemoryException(static_cast(sharedMemoryError)); } -int SharedMemoryHelpers::OpenDirectory(LPCSTR path) +int SharedMemoryHelpers::OpenDirectory(SharedMemorySystemCallErrors *errors, LPCSTR path) { _ASSERTE(path != nullptr); _ASSERTE(path[0] != '\0'); - int fileDescriptor = Open(path, O_RDONLY); + int fileDescriptor = Open(errors, path, O_RDONLY); _ASSERTE(fileDescriptor != -1 || errno == ENOENT); return fileDescriptor; } -int SharedMemoryHelpers::CreateOrOpenFile(LPCSTR path, bool createIfNotExist, bool *createdRef) +int SharedMemoryHelpers::CreateOrOpenFile( + SharedMemorySystemCallErrors *errors, + LPCSTR path, + bool createIfNotExist, + bool *createdRef) { _ASSERTE(path != nullptr); _ASSERTE(path[0] != '\0'); @@ -252,7 +423,7 @@ int SharedMemoryHelpers::CreateOrOpenFile(LPCSTR path, bool createIfNotExist, bo // Try to open the file int openFlags = O_RDWR; - int fileDescriptor = Open(path, openFlags); + int fileDescriptor = Open(errors, path, openFlags); if (fileDescriptor != -1) { if (createdRef != nullptr) @@ -273,13 +444,24 @@ int SharedMemoryHelpers::CreateOrOpenFile(LPCSTR path, bool createIfNotExist, bo // File does not exist, create the file openFlags |= O_CREAT | O_EXCL; - fileDescriptor = Open(path, openFlags, PermissionsMask_AllUsers_ReadWrite); + fileDescriptor = Open(errors, path, openFlags, PermissionsMask_AllUsers_ReadWrite); _ASSERTE(fileDescriptor != -1); // The permissions mask passed to open() is filtered by the process' permissions umask, so open() may not set all of // the requested permissions. Use chmod() to set the proper permissions. - if (chmod(path, PermissionsMask_AllUsers_ReadWrite) != 0) + int operationResult = ChangeMode(path, PermissionsMask_AllUsers_ReadWrite); + if (operationResult != 0) { + if (errors != nullptr) + { + int errorCode = errno; + errors->Append( + "chmod(\"%s\", AllUsers_ReadWrite) == %d; errno == %s;", + path, + operationResult, + GetFriendlyErrorCodeString(errorCode)); + } + CloseFile(fileDescriptor); unlink(path); throw SharedMemoryException(static_cast(SharedMemoryError::IO)); @@ -303,21 +485,54 @@ void SharedMemoryHelpers::CloseFile(int fileDescriptor) } while (closeResult != 0 && errno == EINTR); } -SIZE_T SharedMemoryHelpers::GetFileSize(int fileDescriptor) +int SharedMemoryHelpers::ChangeMode(LPCSTR path, mode_t mode) { + _ASSERTE(path != nullptr); + _ASSERTE(path[0] != '\0'); + + int chmodResult; + do + { + chmodResult = chmod(path, mode); + } while (chmodResult != 0 && errno == EINTR); + + return chmodResult; +} + +SIZE_T SharedMemoryHelpers::GetFileSize(SharedMemorySystemCallErrors *errors, LPCSTR filePath, int fileDescriptor) +{ + _ASSERTE(filePath != nullptr); + _ASSERTE(filePath[0] != '\0'); _ASSERTE(fileDescriptor != -1); off_t endOffset = lseek(fileDescriptor, 0, SEEK_END); if (endOffset == static_cast(-1) || lseek(fileDescriptor, 0, SEEK_SET) == static_cast(-1)) { + if (errors != nullptr) + { + int errorCode = errno; + errors->Append( + "lseek(\"%s\", 0, %s) == -1; errno == %s;", + filePath, + endOffset == (off_t)-1 ? "SEEK_END" : "SEEK_SET", + GetFriendlyErrorCodeString(errorCode)); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } + return endOffset; } -void SharedMemoryHelpers::SetFileSize(int fileDescriptor, SIZE_T byteCount) +void SharedMemoryHelpers::SetFileSize( + SharedMemorySystemCallErrors *errors, + LPCSTR filePath, + int fileDescriptor, + SIZE_T byteCount) { + _ASSERTE(filePath != nullptr); + _ASSERTE(filePath[0] != '\0'); _ASSERTE(fileDescriptor != -1); _ASSERTE(static_cast(byteCount) == byteCount); @@ -328,15 +543,33 @@ void SharedMemoryHelpers::SetFileSize(int fileDescriptor, SIZE_T byteCount) { break; } - if (errno != EINTR) + + int errorCode = errno; + if (errorCode != EINTR) { + if (errors != nullptr) + { + errors->Append( + "ftruncate(\"%s\", %zu) == %d; errno == %s;", + filePath, + byteCount, + ftruncateResult, + GetFriendlyErrorCodeString(errorCode)); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } } } -void *SharedMemoryHelpers::MemoryMapFile(int fileDescriptor, SIZE_T byteCount) +void *SharedMemoryHelpers::MemoryMapFile( + SharedMemorySystemCallErrors *errors, + LPCSTR filePath, + int fileDescriptor, + SIZE_T byteCount) { + _ASSERTE(filePath != nullptr); + _ASSERTE(filePath[0] != '\0'); _ASSERTE(fileDescriptor != -1); _ASSERTE(byteCount > sizeof(SharedMemorySharedDataHeader)); _ASSERTE(AlignDown(byteCount, GetVirtualPageSize()) == byteCount); @@ -346,32 +579,52 @@ void *SharedMemoryHelpers::MemoryMapFile(int fileDescriptor, SIZE_T byteCount) { return sharedMemoryBuffer; } - switch (errno) + + int errorCode = errno; + SharedMemoryError sharedMemoryError; + switch (errorCode) { + case EMFILE: case ENFILE: case ENOMEM: - throw SharedMemoryException(static_cast(SharedMemoryError::OutOfMemory)); + sharedMemoryError = SharedMemoryError::OutOfMemory; + break; default: - throw SharedMemoryException(static_cast(SharedMemoryError::IO)); + sharedMemoryError = SharedMemoryError::IO; + break; } + + if (errors != nullptr) + { + errors->Append( + "mmap(nullptr, %zu, PROT_READ | PROT_WRITE, MAP_SHARED, \"%s\", 0) == MAP_FAILED; errno == %s;", + byteCount, + filePath, + GetFriendlyErrorCodeString(errorCode)); + } + + throw SharedMemoryException(static_cast(sharedMemoryError)); } -bool SharedMemoryHelpers::TryAcquireFileLock(int fileDescriptor, int operation) +bool SharedMemoryHelpers::TryAcquireFileLock(SharedMemorySystemCallErrors *errors, int fileDescriptor, int operation) { // A file lock is acquired once per file descriptor, so the caller will need to synchronize threads of this process _ASSERTE(fileDescriptor != -1); + _ASSERTE((operation & LOCK_EX) ^ (operation & LOCK_SH)); _ASSERTE(!(operation & LOCK_UN)); while (true) { - if (flock(fileDescriptor, operation) == 0) + int flockResult = flock(fileDescriptor, operation); + if (flockResult == 0) { return true; } int flockError = errno; + SharedMemoryError sharedMemoryError = SharedMemoryError::IO; switch (flockError) { case EWOULDBLOCK: @@ -380,9 +633,23 @@ bool SharedMemoryHelpers::TryAcquireFileLock(int fileDescriptor, int operation) case EINTR: continue; - default: - throw SharedMemoryException(static_cast(SharedMemoryError::OutOfMemory)); + case ENOLCK: + sharedMemoryError = SharedMemoryError::OutOfMemory; + break; + } + + if (errors != nullptr) + { + errors->Append( + "flock(%d, %s%s) == %d; errno == %s;", + fileDescriptor, + operation & LOCK_EX ? "LOCK_EX" : "LOCK_SH", + operation & LOCK_NB ? " | LOCK_NB" : "", + flockResult, + GetFriendlyErrorCodeString(flockError)); } + + throw SharedMemoryException(static_cast(sharedMemoryError)); } } @@ -558,6 +825,7 @@ void *SharedMemorySharedDataHeader::GetData() // SharedMemoryProcessDataHeader SharedMemoryProcessDataHeader *SharedMemoryProcessDataHeader::CreateOrOpen( + SharedMemorySystemCallErrors *errors, LPCSTR name, SharedMemorySharedDataHeader requiredSharedDataHeader, SIZE_T sharedDataByteCount, @@ -657,14 +925,14 @@ SharedMemoryProcessDataHeader *SharedMemoryProcessDataHeader::CreateOrOpen( return processDataHeader; } - SharedMemoryManager::AcquireCreationDeletionFileLock(); + SharedMemoryManager::AcquireCreationDeletionFileLock(errors); autoCleanup.m_acquiredCreationDeletionFileLock = true; // Create the session directory SharedMemoryHelpers::VerifyStringOperation(SharedMemoryManager::CopySharedMemoryBasePath(filePath)); SharedMemoryHelpers::VerifyStringOperation(filePath.Append('/')); SharedMemoryHelpers::VerifyStringOperation(id.AppendSessionDirectoryName(filePath)); - if (!SharedMemoryHelpers::EnsureDirectoryExists(filePath, true /* isGlobalLockAcquired */, createIfNotExist)) + if (!SharedMemoryHelpers::EnsureDirectoryExists(errors, filePath, true /* isGlobalLockAcquired */, createIfNotExist)) { _ASSERTE(!createIfNotExist); return nullptr; @@ -677,7 +945,7 @@ SharedMemoryProcessDataHeader *SharedMemoryProcessDataHeader::CreateOrOpen( SharedMemoryHelpers::VerifyStringOperation(filePath.Append(id.GetName(), id.GetNameCharCount())); bool createdFile; - int fileDescriptor = SharedMemoryHelpers::CreateOrOpenFile(filePath, createIfNotExist, &createdFile); + int fileDescriptor = SharedMemoryHelpers::CreateOrOpenFile(errors, filePath, createIfNotExist, &createdFile); if (fileDescriptor == -1) { _ASSERTE(!createIfNotExist); @@ -692,7 +960,7 @@ SharedMemoryProcessDataHeader *SharedMemoryProcessDataHeader::CreateOrOpen( // A shared file lock on the shared memory file would be held by any process that has opened the same file. Try to take // an exclusive lock on the file. Successfully acquiring an exclusive lock indicates that no process has a reference to // the shared memory file, and this process can reinitialize its contents. - if (SharedMemoryHelpers::TryAcquireFileLock(fileDescriptor, LOCK_EX | LOCK_NB)) + if (SharedMemoryHelpers::TryAcquireFileLock(errors, fileDescriptor, LOCK_EX | LOCK_NB)) { // The shared memory file is not being used, flag it as created so that its contents will be reinitialized SharedMemoryHelpers::ReleaseFileLock(fileDescriptor); @@ -711,18 +979,18 @@ SharedMemoryProcessDataHeader *SharedMemoryProcessDataHeader::CreateOrOpen( SIZE_T sharedDataTotalByteCount = SharedMemorySharedDataHeader::GetTotalByteCount(sharedDataByteCount); if (createdFile) { - SharedMemoryHelpers::SetFileSize(fileDescriptor, sharedDataTotalByteCount); + SharedMemoryHelpers::SetFileSize(errors, filePath, fileDescriptor, sharedDataTotalByteCount); } else { - SIZE_T currentFileSize = SharedMemoryHelpers::GetFileSize(fileDescriptor); + SIZE_T currentFileSize = SharedMemoryHelpers::GetFileSize(errors, filePath, fileDescriptor); if (currentFileSize < sharedDataUsedByteCount) { throw SharedMemoryException(static_cast(SharedMemoryError::HeaderMismatch)); } if (currentFileSize < sharedDataTotalByteCount) { - SharedMemoryHelpers::SetFileSize(fileDescriptor, sharedDataTotalByteCount); + SharedMemoryHelpers::SetFileSize(errors, filePath, fileDescriptor, sharedDataTotalByteCount); } } @@ -730,14 +998,23 @@ SharedMemoryProcessDataHeader *SharedMemoryProcessDataHeader::CreateOrOpen( // using the file. An exclusive file lock is attempted above to detect whether the file contents are valid, for the case // where a process crashes or is killed after the file is created. Since we already hold the creation/deletion locks, a // non-blocking file lock should succeed. - if (!SharedMemoryHelpers::TryAcquireFileLock(fileDescriptor, LOCK_SH | LOCK_NB)) + if (!SharedMemoryHelpers::TryAcquireFileLock(errors, fileDescriptor, LOCK_SH | LOCK_NB)) { + if (errors != nullptr) + { + int errorCode = errno; + errors->Append( + "flock(\"%s\", LOCK_SH | LOCK_NB) == -1; errno == %s;", + (const char *)filePath, + GetFriendlyErrorCodeString(errorCode)); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } autoCleanup.m_acquiredFileLock = true; // Map the file into memory, and initialize or validate the header - void *mappedBuffer = SharedMemoryHelpers::MemoryMapFile(fileDescriptor, sharedDataTotalByteCount); + void *mappedBuffer = SharedMemoryHelpers::MemoryMapFile(errors, filePath, fileDescriptor, sharedDataTotalByteCount); autoCleanup.m_mappedBuffer = mappedBuffer; autoCleanup.m_mappedBufferByteCount = sharedDataTotalByteCount; SharedMemorySharedDataHeader *sharedDataHeader; @@ -926,11 +1203,11 @@ void SharedMemoryProcessDataHeader::Close() bool releaseSharedData = false; try { - SharedMemoryManager::AcquireCreationDeletionFileLock(); + SharedMemoryManager::AcquireCreationDeletionFileLock(nullptr); autoReleaseCreationDeletionFileLock.m_acquired = true; SharedMemoryHelpers::ReleaseFileLock(m_fileDescriptor); - if (SharedMemoryHelpers::TryAcquireFileLock(m_fileDescriptor, LOCK_EX | LOCK_NB)) + if (SharedMemoryHelpers::TryAcquireFileLock(nullptr, m_fileDescriptor, LOCK_EX | LOCK_NB)) { SharedMemoryHelpers::ReleaseFileLock(m_fileDescriptor); releaseSharedData = true; @@ -1142,7 +1419,7 @@ void SharedMemoryManager::ReleaseCreationDeletionProcessLock() LeaveCriticalSection(&s_creationDeletionProcessLock); } -void SharedMemoryManager::AcquireCreationDeletionFileLock() +void SharedMemoryManager::AcquireCreationDeletionFileLock(SharedMemorySystemCallErrors *errors) { _ASSERTE(IsCreationDeletionProcessLockAcquired()); _ASSERTE(!IsCreationDeletionFileLockAcquired()); @@ -1150,27 +1427,48 @@ void SharedMemoryManager::AcquireCreationDeletionFileLock() if (s_creationDeletionLockFileDescriptor == -1) { if (!SharedMemoryHelpers::EnsureDirectoryExists( + errors, *gSharedFilesPath, false /* isGlobalLockAcquired */, false /* createIfNotExist */, true /* isSystemDirectory */)) { + _ASSERTE(errno == ENOENT); + if (errors != nullptr) + { + errors->Append("stat(\"%s\", ...) == -1; errno == ENOENT;", (const char *)*gSharedFilesPath); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } + SharedMemoryHelpers::EnsureDirectoryExists( + errors, *s_runtimeTempDirectoryPath, false /* isGlobalLockAcquired */); + SharedMemoryHelpers::EnsureDirectoryExists( + errors, *s_sharedMemoryDirectoryPath, false /* isGlobalLockAcquired */); - s_creationDeletionLockFileDescriptor = SharedMemoryHelpers::OpenDirectory(*s_sharedMemoryDirectoryPath); + + s_creationDeletionLockFileDescriptor = SharedMemoryHelpers::OpenDirectory(errors, *s_sharedMemoryDirectoryPath); if (s_creationDeletionLockFileDescriptor == -1) { + if (errors != nullptr) + { + int errorCode = errno; + errors->Append( + "open(\"%s\", O_RDONLY | O_CLOEXEC, 0) == -1; errno == %s;", + (const char *)*s_sharedMemoryDirectoryPath, + GetFriendlyErrorCodeString(errorCode)); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } } - bool acquiredFileLock = SharedMemoryHelpers::TryAcquireFileLock(s_creationDeletionLockFileDescriptor, LOCK_EX); + bool acquiredFileLock = SharedMemoryHelpers::TryAcquireFileLock(errors, s_creationDeletionLockFileDescriptor, LOCK_EX); _ASSERTE(acquiredFileLock); #ifdef _DEBUG s_creationDeletionFileLockOwnerThreadId = THREADSilentGetCurrentThreadId(); diff --git a/src/coreclr/pal/src/synchmgr/wait.cpp b/src/coreclr/pal/src/synchmgr/wait.cpp index d666d5101ba7a..5ae53759fa2be 100644 --- a/src/coreclr/pal/src/synchmgr/wait.cpp +++ b/src/coreclr/pal/src/synchmgr/wait.cpp @@ -439,7 +439,7 @@ DWORD CorUnix::InternalWaitForMultipleObjectsEx( try { MutexTryAcquireLockResult tryAcquireLockResult = - static_cast(processDataHeader->GetData())->TryAcquireLock(dwMilliseconds); + static_cast(processDataHeader->GetData())->TryAcquireLock(nullptr, dwMilliseconds); switch (tryAcquireLockResult) { case MutexTryAcquireLockResult::AcquiredLock: diff --git a/src/coreclr/pal/src/synchobj/mutex.cpp b/src/coreclr/pal/src/synchobj/mutex.cpp index 3fff2e7917fad..e3d63fe232b49 100644 --- a/src/coreclr/pal/src/synchobj/mutex.cpp +++ b/src/coreclr/pal/src/synchobj/mutex.cpp @@ -23,6 +23,7 @@ SET_DEFAULT_DEBUG_CHANNEL(SYNC); // some headers have code with asserts, so do t #include "pal/mutex.hpp" #include "pal/file.hpp" #include "pal/thread.hpp" +#include "pal/utils.h" #include "../synchmgr/synchmanager.hpp" @@ -92,33 +93,71 @@ static CAllowedObjectTypes aotAnyMutex(anyMutexTypeIds, ARRAY_SIZE(anyMutexTypeI Function: CreateMutexW + See doc for PAL_CreateMutexW. +--*/ + +HANDLE +PALAPI +CreateMutexW( + IN LPSECURITY_ATTRIBUTES lpMutexAttributes, + IN BOOL bInitialOwner, + IN LPCWSTR lpName) +{ + return PAL_CreateMutexW(bInitialOwner, lpName, nullptr, 0); +} + +/*++ +Function: + PAL_CreateMutexW + Note: lpMutexAttributes currently ignored: -- Win32 object security not supported -- handles to mutex objects are not inheritable Parameters: - See MSDN doc. + lpSystemCallErrors -- An optional buffer into which system call errors are written, for more detailed error information. + dwSystemCallErrorsBufferSize -- Size of the buffer pointed to by lpSystemCallErrors in bytes. + + See MSDN docs on CreateMutexW for all other parameters. --*/ HANDLE PALAPI -CreateMutexW( - IN LPSECURITY_ATTRIBUTES lpMutexAttributes, +PAL_CreateMutexW( IN BOOL bInitialOwner, - IN LPCWSTR lpName) + IN LPCWSTR lpName, + IN LPSTR lpSystemCallErrors, + IN DWORD dwSystemCallErrorsBufferSize) { HANDLE hMutex = NULL; PAL_ERROR palError; CPalThread *pthr = NULL; char utf8Name[SHARED_MEMORY_MAX_NAME_CHAR_COUNT + 1]; - PERF_ENTRY(CreateMutexW); - ENTRY("CreateMutexW(lpMutexAttr=%p, bInitialOwner=%d, lpName=%p (%S)\n", - lpMutexAttributes, bInitialOwner, lpName, lpName?lpName:W16_NULLSTRING); + PERF_ENTRY(PAL_CreateMutexW); + ENTRY("PAL_CreateMutexW(bInitialOwner=%d, lpName=%p (%S), lpSystemCallErrors=%p, dwSystemCallErrorsBufferSize=%d\n", + bInitialOwner, + lpName, + lpName?lpName:W16_NULLSTRING, + lpSystemCallErrors, + dwSystemCallErrorsBufferSize); pthr = InternalGetCurrentThread(); + /* validate parameters */ + if ((int)dwSystemCallErrorsBufferSize < 0 || (lpSystemCallErrors == nullptr) != (dwSystemCallErrorsBufferSize == 0)) + { + ERROR("One or more parameters are invalid\n"); + palError = ERROR_INVALID_PARAMETER; + goto CreateMutexWExit; + } + + if (lpSystemCallErrors != nullptr) + { + lpSystemCallErrors[0] = '\0'; + } + if (lpName != nullptr) { int bytesWritten = WideCharToMultiByte(CP_ACP, 0, lpName, -1, utf8Name, ARRAY_SIZE(utf8Name), nullptr, nullptr); @@ -138,13 +177,17 @@ CreateMutexW( } } - palError = InternalCreateMutex( - pthr, - lpMutexAttributes, - bInitialOwner, - lpName == nullptr ? nullptr : utf8Name, - &hMutex - ); + { + SharedMemorySystemCallErrors errors(lpSystemCallErrors, (int)dwSystemCallErrorsBufferSize); + palError = InternalCreateMutex( + &errors, + pthr, + nullptr, + bInitialOwner, + lpName == nullptr ? nullptr : utf8Name, + &hMutex + ); + } CreateMutexWExit: // @@ -156,14 +199,14 @@ CreateMutexW( pthr->SetLastError(palError); - LOGEXIT("CreateMutexW returns HANDLE %p\n", hMutex); - PERF_EXIT(CreateMutexW); + LOGEXIT("PAL_CreateMutexW returns HANDLE %p\n", hMutex); + PERF_EXIT(PAL_CreateMutexW); return hMutex; } /*++ Function: -CreateMutexW +CreateMutexExW Note: lpMutexAttributes currently ignored: @@ -195,14 +238,16 @@ CreateMutexExW( -- handles to mutex objects are not inheritable Parameters: + errors -- An optional wrapper for system call errors, for more detailed error information. pthr -- thread data for calling thread phEvent -- on success, receives the allocated mutex handle - See MSDN docs on CreateMutex for all other parameters + See MSDN docs on CreateMutex for all other parameters. --*/ PAL_ERROR CorUnix::InternalCreateMutex( + SharedMemorySystemCallErrors *errors, CPalThread *pthr, LPSECURITY_ATTRIBUTES lpMutexAttributes, BOOL bInitialOwner, @@ -286,7 +331,7 @@ CorUnix::InternalCreateMutex( SharedMemoryProcessDataHeader *processDataHeader; try { - processDataHeader = NamedMutexProcessData::CreateOrOpen(lpName, !!bInitialOwner, &createdNamedMutex); + processDataHeader = NamedMutexProcessData::CreateOrOpen(errors, lpName, !!bInitialOwner, &createdNamedMutex); } catch (SharedMemoryException ex) { @@ -512,7 +557,7 @@ OpenMutexA ( goto OpenMutexAExit; } - palError = InternalOpenMutex(pthr, lpName, &hMutex); + palError = InternalOpenMutex(nullptr, pthr, lpName, &hMutex); OpenMutexAExit: if (NO_ERROR != palError) @@ -529,11 +574,8 @@ OpenMutexA ( Function: OpenMutexW -Note: - dwDesiredAccess is currently ignored (no Win32 object security support) - bInheritHandle is currently ignored (handles to mutexes are not inheritable) - -See MSDN doc. +Parameters: + See doc for PAL_OpenMutexW. --*/ HANDLE @@ -542,26 +584,61 @@ OpenMutexW( IN DWORD dwDesiredAccess, IN BOOL bInheritHandle, IN LPCWSTR lpName) +{ + return PAL_OpenMutexW(lpName, nullptr, 0); +} + +/*++ +Function: + PAL_OpenMutexW + +Note: + dwDesiredAccess is currently ignored (no Win32 object security support) + bInheritHandle is currently ignored (handles to mutexes are not inheritable) + +Parameters: + lpSystemCallErrors -- An optional buffer into which system call errors are written, for more detailed error information. + dwSystemCallErrorsBufferSize -- Size of the buffer pointed to by lpSystemCallErrors in bytes. + + See MSDN docs on OpenMutexW for all other parameters. +--*/ + +HANDLE +PALAPI +PAL_OpenMutexW( + IN LPCWSTR lpName, + IN LPSTR lpSystemCallErrors, + IN DWORD dwSystemCallErrorsBufferSize) { HANDLE hMutex = NULL; PAL_ERROR palError = NO_ERROR; CPalThread *pthr = NULL; char utf8Name[SHARED_MEMORY_MAX_NAME_CHAR_COUNT + 1]; - PERF_ENTRY(OpenMutexW); - ENTRY("OpenMutexW(dwDesiredAccess=%#x, bInheritHandle=%d, lpName=%p (%S))\n", - dwDesiredAccess, bInheritHandle, lpName, lpName?lpName:W16_NULLSTRING); + PERF_ENTRY(PAL_OpenMutexW); + ENTRY("PAL_OpenMutexW(lpName=%p (%S), lpSystemCallErrors=%p, dwSystemCallErrorsBufferSize=%d)\n", + lpName, + lpName?lpName:W16_NULLSTRING, + lpSystemCallErrors, + dwSystemCallErrorsBufferSize); pthr = InternalGetCurrentThread(); /* validate parameters */ - if (lpName == nullptr) + if (lpName == nullptr || + (int)dwSystemCallErrorsBufferSize < 0 || + (lpSystemCallErrors == nullptr) != (dwSystemCallErrorsBufferSize == 0)) { - ERROR("name is NULL\n"); + ERROR("name is NULL or other parameters are invalid\n"); palError = ERROR_INVALID_PARAMETER; goto OpenMutexWExit; } + if (lpSystemCallErrors != nullptr) + { + lpSystemCallErrors[0] = '\0'; + } + { int bytesWritten = WideCharToMultiByte(CP_ACP, 0, lpName, -1, utf8Name, ARRAY_SIZE(utf8Name), nullptr, nullptr); if (bytesWritten == 0) @@ -578,9 +655,10 @@ OpenMutexW( } goto OpenMutexWExit; } - } - palError = InternalOpenMutex(pthr, lpName == nullptr ? nullptr : utf8Name, &hMutex); + SharedMemorySystemCallErrors errors(lpSystemCallErrors, (int)dwSystemCallErrorsBufferSize); + palError = InternalOpenMutex(&errors, pthr, lpName == nullptr ? nullptr : utf8Name, &hMutex); + } OpenMutexWExit: if (NO_ERROR != palError) @@ -588,8 +666,8 @@ OpenMutexW( pthr->SetLastError(palError); } - LOGEXIT("OpenMutexW returns HANDLE %p\n", hMutex); - PERF_EXIT(OpenMutexW); + LOGEXIT("PAL_OpenMutexW returns HANDLE %p\n", hMutex); + PERF_EXIT(PAL_OpenMutexW); return hMutex; } @@ -599,6 +677,7 @@ OpenMutexW( InternalOpenMutex Parameters: + errors -- An optional wrapper for system call errors, for more detailed error information. pthr -- thread data for calling thread phEvent -- on success, receives the allocated mutex handle @@ -607,6 +686,7 @@ OpenMutexW( PAL_ERROR CorUnix::InternalOpenMutex( + SharedMemorySystemCallErrors *errors, CPalThread *pthr, LPCSTR lpName, HANDLE *phMutex @@ -645,7 +725,7 @@ CorUnix::InternalOpenMutex( SharedMemoryProcessDataHeader *processDataHeader; try { - processDataHeader = NamedMutexProcessData::Open(lpName); + processDataHeader = NamedMutexProcessData::Open(errors, lpName); } catch (SharedMemoryException ex) { @@ -746,7 +826,7 @@ DWORD SPINLOCKTryAcquire (LONG * lock) // MutexHelpers #if NAMED_MUTEX_USE_PTHREAD_MUTEX -void MutexHelpers::InitializeProcessSharedRobustRecursiveMutex(pthread_mutex_t *mutex) +void MutexHelpers::InitializeProcessSharedRobustRecursiveMutex(SharedMemorySystemCallErrors *errors, pthread_mutex_t *mutex) { _ASSERTE(mutex != nullptr); @@ -772,6 +852,11 @@ void MutexHelpers::InitializeProcessSharedRobustRecursiveMutex(pthread_mutex_t * int error = pthread_mutexattr_init(&mutexAttributes); if (error != 0) { + if (errors != nullptr) + { + errors->Append("pthread_mutexattr_init(...) == %s;", GetFriendlyErrorCodeString(error)); + } + throw SharedMemoryException(static_cast(SharedMemoryError::OutOfMemory)); } autoCleanup.m_mutexAttributes = &mutexAttributes; @@ -788,6 +873,11 @@ void MutexHelpers::InitializeProcessSharedRobustRecursiveMutex(pthread_mutex_t * error = pthread_mutex_init(mutex, &mutexAttributes); if (error != 0) { + if (errors != nullptr) + { + errors->Append("pthread_mutex_init(...) == %s;", GetFriendlyErrorCodeString(error)); + } + throw SharedMemoryException(static_cast(error == EPERM ? SharedMemoryError::IO : SharedMemoryError::OutOfMemory)); } } @@ -800,7 +890,10 @@ void MutexHelpers::DestroyMutex(pthread_mutex_t *mutex) _ASSERTE(error == 0 || error == EBUSY); // the error will be EBUSY if the mutex is locked } -MutexTryAcquireLockResult MutexHelpers::TryAcquireLock(pthread_mutex_t *mutex, DWORD timeoutMilliseconds) +MutexTryAcquireLockResult MutexHelpers::TryAcquireLock( + SharedMemorySystemCallErrors *errors, + pthread_mutex_t *mutex, + DWORD timeoutMilliseconds) { _ASSERTE(mutex != nullptr); @@ -850,7 +943,19 @@ MutexTryAcquireLockResult MutexHelpers::TryAcquireLock(pthread_mutex_t *mutex, D throw SharedMemoryException(static_cast(NamedMutexError::MaximumRecursiveLocksReached)); default: + { + if (errors != nullptr) + { + errors->Append( + "%s(...) == %s;", + timeoutMilliseconds == (DWORD)-1 ? "pthread_mutex_lock" + : timeoutMilliseconds == 0 ? "pthread_mutex_trylock" + : "pthread_mutex_timedlock", + GetFriendlyErrorCodeString(lockResult)); + } + throw SharedMemoryException(static_cast(NamedMutexError::Unknown)); + } } } @@ -866,7 +971,7 @@ void MutexHelpers::ReleaseLock(pthread_mutex_t *mutex) //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // NamedMutexSharedData -NamedMutexSharedData::NamedMutexSharedData() +NamedMutexSharedData::NamedMutexSharedData(SharedMemorySystemCallErrors *errors) : #if !NAMED_MUTEX_USE_PTHREAD_MUTEX m_timedWaiterCount(0), @@ -883,7 +988,7 @@ NamedMutexSharedData::NamedMutexSharedData() _ASSERTE(SharedMemoryManager::IsCreationDeletionFileLockAcquired()); #if NAMED_MUTEX_USE_PTHREAD_MUTEX - MutexHelpers::InitializeProcessSharedRobustRecursiveMutex(&m_lock); + MutexHelpers::InitializeProcessSharedRobustRecursiveMutex(errors, &m_lock); #endif // NAMED_MUTEX_USE_PTHREAD_MUTEX } @@ -917,6 +1022,7 @@ void NamedMutexSharedData::IncTimedWaiterCount() ULONG newValue = InterlockedIncrement(reinterpret_cast(&m_timedWaiterCount)); if (newValue == 0) { + InterlockedDecrement(reinterpret_cast(&m_timedWaiterCount)); throw SharedMemoryException(static_cast(SharedMemoryError::OutOfMemory)); } } @@ -979,17 +1085,22 @@ const UINT8 NamedMutexProcessData::SyncSystemVersion = 1; const DWORD NamedMutexProcessData::PollLoopMaximumSleepMilliseconds = 100; -SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen(LPCSTR name, bool acquireLockIfCreated, bool *createdRef) +SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen( + SharedMemorySystemCallErrors *errors, + LPCSTR name, + bool acquireLockIfCreated, + bool *createdRef) { - return CreateOrOpen(name, true /* createIfNotExist */, acquireLockIfCreated, createdRef); + return CreateOrOpen(errors, name, true /* createIfNotExist */, acquireLockIfCreated, createdRef); } -SharedMemoryProcessDataHeader *NamedMutexProcessData::Open(LPCSTR name) +SharedMemoryProcessDataHeader *NamedMutexProcessData::Open(SharedMemorySystemCallErrors *errors, LPCSTR name) { - return CreateOrOpen(name, false /* createIfNotExist */, false /* acquireLockIfCreated */, nullptr /* createdRef */); + return CreateOrOpen(errors, name, false /* createIfNotExist */, false /* acquireLockIfCreated */, nullptr /* createdRef */); } SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen( + SharedMemorySystemCallErrors *errors, LPCSTR name, bool createIfNotExist, bool acquireLockIfCreated, @@ -1079,6 +1190,7 @@ SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen( bool created; SharedMemoryProcessDataHeader *processDataHeader = SharedMemoryProcessDataHeader::CreateOrOpen( + errors, name, SharedMemorySharedDataHeader(SharedMemoryType::Mutex, SyncSystemVersion), sizeof(NamedMutexSharedData), @@ -1105,7 +1217,7 @@ SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen( if (created) { // Initialize the shared data - new(processDataHeader->GetSharedDataHeader()->GetData()) NamedMutexSharedData; + new(processDataHeader->GetSharedDataHeader()->GetData()) NamedMutexSharedData(errors); } if (processDataHeader->GetData() == nullptr) @@ -1115,7 +1227,7 @@ SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen( SharedMemoryHelpers::BuildSharedFilesPath(lockFilePath, SHARED_MEMORY_LOCK_FILES_DIRECTORY_NAME); if (created) { - SharedMemoryHelpers::EnsureDirectoryExists(lockFilePath, true /* isGlobalLockAcquired */); + SharedMemoryHelpers::EnsureDirectoryExists(errors, lockFilePath, true /* isGlobalLockAcquired */); } // Create the session directory @@ -1124,7 +1236,7 @@ SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen( SharedMemoryHelpers::VerifyStringOperation(id->AppendSessionDirectoryName(lockFilePath)); if (created) { - SharedMemoryHelpers::EnsureDirectoryExists(lockFilePath, true /* isGlobalLockAcquired */); + SharedMemoryHelpers::EnsureDirectoryExists(errors, lockFilePath, true /* isGlobalLockAcquired */); autoCleanup.m_lockFilePath = &lockFilePath; autoCleanup.m_sessionDirectoryPathCharCount = lockFilePath.GetCount(); } @@ -1132,14 +1244,22 @@ SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen( // Create or open the lock file SharedMemoryHelpers::VerifyStringOperation(lockFilePath.Append('/')); SharedMemoryHelpers::VerifyStringOperation(lockFilePath.Append(id->GetName(), id->GetNameCharCount())); - int lockFileDescriptor = SharedMemoryHelpers::CreateOrOpenFile(lockFilePath, created); + int lockFileDescriptor = SharedMemoryHelpers::CreateOrOpenFile(errors, lockFilePath, created); if (lockFileDescriptor == -1) { _ASSERTE(!created); if (createIfNotExist) { + if (errors != nullptr) + { + errors->Append( + "open(\"%s\", O_RDWR | O_CREAT | O_EXCL | O_CLOEXEC, 0) == -1; errno == ENOENT;", + (const char *)lockFilePath); + } + throw SharedMemoryException(static_cast(SharedMemoryError::IO)); } + return nullptr; } autoCleanup.m_createdLockFile = created; @@ -1164,7 +1284,7 @@ SharedMemoryProcessDataHeader *NamedMutexProcessData::CreateOrOpen( // If the mutex was created and if requested, acquire the lock initially while holding the creation/deletion locks if (created && acquireLockIfCreated) { - MutexTryAcquireLockResult tryAcquireLockResult = processData->TryAcquireLock(0); + MutexTryAcquireLockResult tryAcquireLockResult = processData->TryAcquireLock(errors, 0); _ASSERTE(tryAcquireLockResult == MutexTryAcquireLockResult::AcquiredLock); } } @@ -1331,12 +1451,12 @@ void NamedMutexProcessData::SetNextInThreadOwnedNamedMutexList(NamedMutexProcess m_nextInThreadOwnedNamedMutexList = next; } -MutexTryAcquireLockResult NamedMutexProcessData::TryAcquireLock(DWORD timeoutMilliseconds) +MutexTryAcquireLockResult NamedMutexProcessData::TryAcquireLock(SharedMemorySystemCallErrors *errors, DWORD timeoutMilliseconds) { NamedMutexSharedData *sharedData = GetSharedData(); #if NAMED_MUTEX_USE_PTHREAD_MUTEX - MutexTryAcquireLockResult result = MutexHelpers::TryAcquireLock(sharedData->GetLock(), timeoutMilliseconds); + MutexTryAcquireLockResult result = MutexHelpers::TryAcquireLock(errors, sharedData->GetLock(), timeoutMilliseconds); if (result == MutexTryAcquireLockResult::TimedOut) { return result; @@ -1445,7 +1565,7 @@ MutexTryAcquireLockResult NamedMutexProcessData::TryAcquireLock(DWORD timeoutMil bool acquiredFileLock = false; while (sharedData->HasAnyTimedWaiters()) { - if (SharedMemoryHelpers::TryAcquireFileLock(m_sharedLockFileDescriptor, LOCK_EX | LOCK_NB)) + if (SharedMemoryHelpers::TryAcquireFileLock(errors, m_sharedLockFileDescriptor, LOCK_EX | LOCK_NB)) { acquiredFileLock = true; break; @@ -1457,13 +1577,13 @@ MutexTryAcquireLockResult NamedMutexProcessData::TryAcquireLock(DWORD timeoutMil break; } - acquiredFileLock = SharedMemoryHelpers::TryAcquireFileLock(m_sharedLockFileDescriptor, LOCK_EX); + acquiredFileLock = SharedMemoryHelpers::TryAcquireFileLock(errors, m_sharedLockFileDescriptor, LOCK_EX); _ASSERTE(acquiredFileLock); break; } case 0: - if (!SharedMemoryHelpers::TryAcquireFileLock(m_sharedLockFileDescriptor, LOCK_EX | LOCK_NB)) + if (!SharedMemoryHelpers::TryAcquireFileLock(errors, m_sharedLockFileDescriptor, LOCK_EX | LOCK_NB)) { return MutexTryAcquireLockResult::TimedOut; } @@ -1472,7 +1592,7 @@ MutexTryAcquireLockResult NamedMutexProcessData::TryAcquireLock(DWORD timeoutMil default: { // Try to acquire the file lock without waiting - if (SharedMemoryHelpers::TryAcquireFileLock(m_sharedLockFileDescriptor, LOCK_EX | LOCK_NB)) + if (SharedMemoryHelpers::TryAcquireFileLock(errors, m_sharedLockFileDescriptor, LOCK_EX | LOCK_NB)) { break; } @@ -1511,7 +1631,7 @@ MutexTryAcquireLockResult NamedMutexProcessData::TryAcquireLock(DWORD timeoutMil ? remainingMilliseconds : PollLoopMaximumSleepMilliseconds; Sleep(sleepMilliseconds); - } while (!SharedMemoryHelpers::TryAcquireFileLock(m_sharedLockFileDescriptor, LOCK_EX | LOCK_NB)); + } while (!SharedMemoryHelpers::TryAcquireFileLock(errors, m_sharedLockFileDescriptor, LOCK_EX | LOCK_NB)); break; } } diff --git a/src/coreclr/scripts/genEventPipe.py b/src/coreclr/scripts/genEventPipe.py index 527180028a9fb..2f3bd00be0a35 100644 --- a/src/coreclr/scripts/genEventPipe.py +++ b/src/coreclr/scripts/genEventPipe.py @@ -1107,7 +1107,9 @@ def getMonoEventPipeImplFilePrefix(): #if WCHAR_MAX == 0xFFFF provider_name_utf8 = g_utf16_to_utf8 ((const gunichar2 *)provider_name, -1, NULL, NULL, NULL); #else - provider_name_utf8 = g_ucs4_to_utf8 ((const gunichar *)provider_name, -1, NULL, NULL, NULL); + gunichar2 *provider_name_utf16 = g_ucs4_to_utf16 ((const gunichar *)provider_name, -1, NULL, NULL, NULL); + provider_name_utf8 = g_utf16_to_utf8 (provider_name_utf16, -1, NULL, NULL, NULL); + g_free (provider_name_utf16); #endif ep_return_null_if_nok (provider_name_utf8 != NULL); diff --git a/src/coreclr/scripts/genEventing.py b/src/coreclr/scripts/genEventing.py index 1086de5e88163..87f3163660e52 100644 --- a/src/coreclr/scripts/genEventing.py +++ b/src/coreclr/scripts/genEventing.py @@ -437,13 +437,16 @@ def generateClrallEvents(eventNodes, allTemplates, target_cpp, runtimeFlavor, wr clrallEvents.append(" {return ") clrallEvents.append("EventPipeEventEnabled" + eventName + "()") - # @TODO Need to add this to nativeaot after switching to using genEtwProvider.py where this fn will be implemented - if runtimeFlavor.coreclr or write_xplatheader: + if runtimeFlavor.coreclr or write_xplatheader or runtimeFlavor.nativeaot: if os.name == 'posix': - clrallEvents.append(" || (XplatEventLogger" + - ("::" if target_cpp else "_") + - "IsEventLoggingEnabled() && EventXplatEnabled" + - eventName + "());}\n\n") + # native AOT does not support non-windows eventing other than via event pipe + if not runtimeFlavor.nativeaot: + clrallEvents.append(" || (XplatEventLogger" + + ("::" if target_cpp else "_") + + "IsEventLoggingEnabled() && EventXplatEnabled" + + eventName + "());}\n\n") + else: + clrallEvents.append(";}\n\n") else: clrallEvents.append(" || EventXplatEnabled" + eventName + "();}\n\n") else: @@ -782,7 +785,10 @@ def updateclreventsfile(write_xplatheader, target_cpp, runtimeFlavor, eventpipe_ Clrallevents.write('#include \n') Clrallevents.write('#include "clretwallmain.h"\n') Clrallevents.write('#include "clreventpipewriteevents.h"\n') - Clrallevents.write('#include "EtwEvents.h"\n\n') + Clrallevents.write('#ifdef FEATURE_ETW\n') + Clrallevents.write('#include "ClrEtwAll.h"\n') + Clrallevents.write('#endif\n') + Clrallevents.write('\n') elif generatedFileType == "source-impl-noop": Clrallevents.write('#include \n') Clrallevents.write('#include \n\n') diff --git a/src/coreclr/scripts/jitformat.py b/src/coreclr/scripts/jitformat.py index 51a096c59cd3c..e697e0e5ee350 100644 --- a/src/coreclr/scripts/jitformat.py +++ b/src/coreclr/scripts/jitformat.py @@ -10,59 +10,12 @@ # Script to install and run jit-format over jit source for all configurations. ################################################################################ - import argparse import jitutil import logging import os -import shutil import subprocess import sys -import tarfile -import tempfile -import zipfile - -class ChangeDir: - def __init__(self, dir): - self.dir = dir - self.cwd = None - - def __enter__(self): - self.cwd = os.getcwd() - os.chdir(self.dir) - - def __exit__(self, exc_type, exc_val, exc_tb): - os.chdir(self.cwd) - -class TempDir: - def __init__(self, path=None): - self.dir = tempfile.mkdtemp() if path is None else path - self.cwd = None - - def __enter__(self): - self.cwd = os.getcwd() - os.chdir(self.dir) - - return self.dir - - def __exit__(self, exc_type, exc_val, exc_tb): - os.chdir(self.cwd) - -def expandPath(path): - return os.path.abspath(os.path.expanduser(path)) - -def del_rw(action, name, exc): - os.chmod(name, 0o651) - os.remove(name) - -def cleanup(jitUtilsPath, bootstrapPath): - if os.path.isdir(jitUtilsPath): - logging.info("Deleting " + jitUtilsPath) - shutil.rmtree(jitUtilsPath, onerror=del_rw) - - if os.path.isfile(bootstrapPath): - logging.info("Deleting " + bootstrapPath) - os.remove(bootstrapPath) def main(argv): logging.basicConfig(format="[%(asctime)s] %(message)s", datefmt="%H:%M:%S") @@ -70,21 +23,28 @@ def main(argv): logger.setLevel(logging.INFO) parser = argparse.ArgumentParser() + required = parser.add_argument_group('required arguments') required.add_argument('-a', '--arch', type=str, default=None, help='architecture to run jit-format on') required.add_argument('-o', '--os', type=str, default=None, help='operating system') - required.add_argument('-c', '--coreclr', type=str, - default=None, help='full path to coreclr') + required.add_argument('-r', '--runtime', type=str, + default=None, help='full path to runtime repo root') + + optional = parser.add_argument_group('optional arguments') + optional.add_argument('--cross', action="store_true", + default=None, help='do cross builds on Linux') + optional.add_argument('-j', '--jitutils', type=str, + default=None, help='full path to built jitutils repo root. Uses this instead of downloading bootstrap.sh/cmd and cloning/building jitutils.') args, unknown = parser.parse_known_args(argv) if unknown: logging.warning('Ignoring argument(s): {}'.format(','.join(unknown))) - if args.coreclr is None: - logging.error('Specify --coreclr') + if args.runtime is None: + logging.error('Specify --runtime') return -1 if args.os is None: logging.error('Specify --os') @@ -92,126 +52,141 @@ def main(argv): if args.arch is None: logging.error('Specify --arch') return -1 + if args.cross: + if args.os != "linux": + logging.error('--cross is only valid with -os linux') + return -1 + if args.jitutils is not None: + jitutilsRoot = os.path.abspath(os.path.expanduser(args.jitutils)) + if not os.path.isdir(jitutilsRoot): + logging.error('Bad path to jitutils') + return -1 - if not os.path.isdir(expandPath(args.coreclr)): - logging.error('Bad path to coreclr') + runtime = os.path.abspath(os.path.expanduser(args.runtime)).replace('/', os.sep) + if not os.path.isdir(runtime): + logging.error('Bad runtime path') return -1 - coreclr = args.coreclr.replace('/', os.sep) - platform = args.os arch = args.arch my_env = os.environ - # Download formatting tools - repoRoot = os.path.dirname(os.path.dirname(coreclr)) - formattingScriptFolder = os.path.join(repoRoot, "eng", "formatting") - formattingDownloadScriptCommand = [] - if platform == 'linux' or platform == 'osx': - formattingDownloadScriptCommand = [os.path.join(formattingScriptFolder, "download-tools.sh")] - elif platform == 'windows': + # Download formatting tools clang-format and clang-tidy and add them to PATH + formattingScriptFolder = os.path.join(runtime, "eng", "formatting") + if not os.path.isdir(formattingScriptFolder): + logging.error('Bad runtime path: eng/formatting directory not found') + return -1 + + if platform == 'windows': formattingDownloadScriptCommand = ["powershell", os.path.join(formattingScriptFolder, "download-tools.ps1")] + else: + formattingDownloadScriptCommand = [os.path.join(formattingScriptFolder, "download-tools.sh")] proc = subprocess.Popen(formattingDownloadScriptCommand) - if proc.wait() != 0: logging.error("Formatting tool download failed") return -1 - my_env["PATH"] = os.path.join(repoRoot, "artifacts", "tools") + os.pathsep + my_env["PATH"] + my_env["PATH"] = os.path.join(runtime, "artifacts", "tools") + os.pathsep + my_env["PATH"] - # Download bootstrap + with jitutil.TempDir() as temp_location: + assert len(os.listdir(temp_location)) == 0 - bootstrapFilename = "" + if args.jitutils is not None: + logging.info('--jitutils passed: not downloading bootstrap.cmd/sh and cloning/building jitutils repo') - jitUtilsPath = os.path.join(coreclr, "jitutils") + else: - cleanup(jitUtilsPath, '') + # Download bootstrap + if platform == 'windows': + bootstrapFilename = "bootstrap.cmd" + else: + bootstrapFilename = "bootstrap.sh" - if platform == 'linux' or platform == 'osx': - bootstrapFilename = "bootstrap.sh" - elif platform == 'windows': - bootstrapFilename = "bootstrap.cmd" + bootstrapUrl = "https://raw.githubusercontent.com/dotnet/jitutils/main/" + bootstrapFilename + bootstrapPath = os.path.join(temp_location, bootstrapFilename) + if not jitutil.download_one_url(bootstrapUrl, bootstrapPath) or not os.path.isfile(bootstrapPath): + logging.error("Did not download bootstrap!") + return -1 - bootstrapUrl = "https://raw.githubusercontent.com/dotnet/jitutils/main/" + bootstrapFilename + if platform == 'windows': + # Need to ensure we have Windows line endings on the downloaded script file, + # which is downloaded with Unix line endings. + logging.info('Convert {} to Windows line endings'.format(bootstrapPath)) - with TempDir() as temp_location: - bootstrapPath = os.path.join(temp_location, bootstrapFilename) + content = None + with open(bootstrapPath, 'rb') as open_file: + content = open_file.read() - assert len(os.listdir(os.path.dirname(bootstrapPath))) == 0 + content = content.replace(b'\n', b'\r\n') - if not jitutil.download_one_url(bootstrapUrl, bootstrapPath): - logging.error("Did not download bootstrap!") - return -1 + with open(bootstrapPath, 'wb') as open_file: + open_file.write(content) - if platform == 'windows': - # Need to ensure we have Windows line endings on the downloaded script file, - # which is downloaded with Unix line endings. - logging.info('Convert {} to Windows line endings'.format(bootstrapPath)) + # On *nix platforms, we need to make the bootstrap file executable - content = None - with open(bootstrapPath, 'rb') as open_file: - content = open_file.read() + if platform == 'linux' or platform == 'osx': + logging.info("Making bootstrap executable") + os.chmod(bootstrapPath, 0o751) - content = content.replace(b'\n', b'\r\n') + # Run bootstrap + if platform == 'windows': + command = [bootstrapPath] + else: + command = ['bash', bootstrapPath] - with open(bootstrapPath, 'wb') as open_file: - open_file.write(content) + command_string = " ".join(command) + logging.info('Running: {}'.format(command_string)) + proc = subprocess.Popen(command, env=my_env) + output,error = proc.communicate() + if proc.returncode != 0: + logging.error("Bootstrap failed") + return -1 - # On *nix platforms, we need to make the bootstrap file executable + jitutilsRoot = os.path.join(temp_location, "jitutils") - if platform == 'linux' or platform == 'osx': - logging.info("Making bootstrap executable") - os.chmod(bootstrapPath, 0o751) + # end of 'if args.jitutils is None' - # Run bootstrap - if platform == 'linux' or platform == 'osx': - logging.info('Running: bash {}'.format(bootstrapPath)) - proc = subprocess.Popen(['bash', bootstrapPath], env=my_env) - output,error = proc.communicate() - elif platform == 'windows': - logging.info('Running: {}'.format(bootstrapPath)) - proc = subprocess.Popen([bootstrapPath], env=my_env) - output,error = proc.communicate() + # Run jit-format - if proc.returncode != 0: - cleanup('', bootstrapPath) - logging.error("Bootstrap failed") + jitutilsBin = os.path.join(jitutilsRoot, "bin") + if not os.path.isdir(jitutilsBin): + logging.error("jitutils not built!") return -1 - # Run jit-format - - returncode = 0 - jitutilsBin = os.path.join(os.path.dirname(bootstrapPath), "jitutils", "bin") my_env["PATH"] = jitutilsBin + os.pathsep + my_env["PATH"] - if not os.path.isdir(jitutilsBin): - logging.error("Jitutils not built!") - return -1 + if platform == 'windows': + jitformat = os.path.join(jitutilsBin, "jit-format.exe") + else: + jitformat = os.path.join(jitutilsBin, "jit-format") - jitformat = jitutilsBin + if not os.path.isfile(jitformat): + logging.error("jit-format not found") + return -1 - if platform == 'linux' or platform == 'osx': - jitformat = os.path.join(jitformat, "jit-format") - elif platform == 'windows': - jitformat = os.path.join(jitformat,"jit-format.exe") errorMessage = "" builds = ["Checked", "Debug", "Release"] projects = ["dll", "standalone", "crossgen"] + returncode = 0 for build in builds: for project in projects: - command = jitformat + " -a " + arch + " -b " + build + " -o " + platform + " -c " + coreclr + " --verbose --projects " + project - logging.info('Running: {}'.format(command)) - proc = subprocess.Popen([jitformat, "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env) + command = [jitformat, "-a", arch, "-b", build, "-o", platform, "-r", runtime, "--verbose", "--projects", project] + if args.cross: + command += ["--cross"] + + command_string = " ".join(command) + logging.info('Running: {}'.format(command_string)) + proc = subprocess.Popen(command, env=my_env) output,error = proc.communicate() errorcode = proc.returncode if errorcode != 0: errorMessage += "\tjit-format -a " + arch + " -b " + build + " -o " + platform - errorMessage += " -c --verbose --fix --projects " + project +"\n" + errorMessage += " -r --verbose --fix --projects " + project + "\n" returncode = errorcode # Fix mode doesn't return an error, so we have to run the build, then run with @@ -219,25 +194,23 @@ def main(argv): # of jit-format will return a formatting failure. if errorcode == -2: # If errorcode was -2, no need to run clang-tidy again - proc = subprocess.Popen([jitformat, "--fix", "--untidy", "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env) + proc = subprocess.Popen([jitformat, "--fix", "--untidy", "-a", arch, "-b", build, "-o", platform, "-r", runtime, "--verbose", "--projects", project], env=my_env) output,error = proc.communicate() else: # Otherwise, must run both - proc = subprocess.Popen([jitformat, "--fix", "-a", arch, "-b", build, "-o", platform, "-c", coreclr, "--verbose", "--projects", project], env=my_env) + proc = subprocess.Popen([jitformat, "--fix", "-a", arch, "-b", build, "-o", platform, "-r", runtime, "--verbose", "--projects", project], env=my_env) output,error = proc.communicate() - patchFilePath = os.path.join(coreclr, "format.patch") + patchFilePath = os.path.join(runtime, "format.patch") if returncode != 0: # Create a patch file logging.info("Creating patch file {}".format(patchFilePath)) - jitSrcPath = os.path.join(coreclr, "jit") + jitSrcPath = os.path.join(runtime, "src", "coreclr", "jit") patchFile = open(patchFilePath, "w") proc = subprocess.Popen(["git", "diff", "--patch", "-U20", "--", jitSrcPath], env=my_env, stdout=patchFile) output,error = proc.communicate() - cleanup(jitUtilsPath, bootstrapPath) - if returncode != 0: logging.info("There were errors in formatting. Please run jit-format locally with: \n") logging.info(errorMessage) diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py index b9bd822524226..4113c97108b86 100644 --- a/src/coreclr/scripts/superpmi.py +++ b/src/coreclr/scripts/superpmi.py @@ -482,29 +482,7 @@ def create_artifacts_base_name(coreclr_args, mch_file): artifacts_base_name = "{}.{}".format(coreclr_args.tag, artifacts_base_name) return artifacts_base_name - -def read_csv_metrics(path): - """ Read a metrics summary file produced by superpmi and return the rows as a dictionary of dictionaries. - - Args: - path (str) : path to .csv file - - Returns: - A dictionary of dictionaries. For example, dict["Overall"]["Successful - compiles"] will access the total number of successful compiles and - dict["MinOpts"]["Successful compiles"] will access the number of - minopts compilations. - """ - - dict = {} - with open(path) as csv_file: - reader = csv.DictReader(csv_file) - for row in reader: - dict[row["Name"]] = row - - return dict - -def read_csv_diffs(path): +def read_csv(path): with open(path) as csv_file: reader = csv.DictReader(csv_file) return list(reader) @@ -1620,19 +1598,19 @@ def replay(self): flags = common_flags.copy() fail_mcl_file = os.path.join(temp_location, os.path.basename(mch_file) + "_fail.mcl") - metrics_summary_file = os.path.join(temp_location, os.path.basename(mch_file) + "_metrics.csv") + details_info_file = os.path.join(temp_location, os.path.basename(mch_file) + "_details.csv") flags += [ "-f", fail_mcl_file, # Failing mc List - "-metricsSummary", metrics_summary_file + "-details", details_info_file ] command = [self.superpmi_path] + flags + [self.jit_path, mch_file] return_code = run_and_log(command) - metrics = read_csv_metrics(metrics_summary_file) + details = read_csv(details_info_file) + print_superpmi_result(return_code, self.coreclr_args, self.aggregate_replay_metrics(details), None) - print_superpmi_result(return_code, self.coreclr_args, metrics, None) if return_code != 0: # Don't report as replay failure missing data (return code 3). # Anything else, such as compilation failure (return code 1, typically a JIT assert) will be @@ -1667,6 +1645,29 @@ def replay(self): return result + def aggregate_replay_metrics(self, details): + """ Given the CSV details file output by SPMI for a replay aggregate the + successes, misses and failures + + Returns: + A dictionary of metrics + """ + + num_successes = 0 + num_misses = 0 + num_failures = 0 + for row in details: + result = row["Result"] + if result == "Success": + num_successes += 1 + elif result == "Miss": + num_misses += 1 + else: + assert(result == "Failure") + num_failures += 1 + + return {"Overall": {"Successful compiles": num_successes, "Missing compiles": num_misses, "Failing compiles": num_failures}} + ################################################################################ # SuperPMI Replay/AsmDiffs ################################################################################ @@ -1753,6 +1754,71 @@ def __enter__(self): def __exit__(self, *args): self.write_fh.write("\n\n\n") +def aggregate_diff_metrics(details): + """ Given the CSV details file output by SPMI for a diff aggregate the metrics. + """ + + base_minopts = {"Successful compiles": 0, "Missing compiles": 0, "Failing compiles": 0, + "Contexts with diffs": 0, "Diffed code bytes": 0, "Diff executed instructions": 0} + base_fullopts = base_minopts.copy() + + diff_minopts = base_minopts.copy() + diff_fullopts = base_minopts.copy() + + for row in details: + base_result = row["Base result"] + + if row["MinOpts"] == "True": + base_dict = base_minopts + diff_dict = diff_minopts + else: + base_dict = base_fullopts + diff_dict = diff_fullopts + + if base_result == "Success": + base_dict["Successful compiles"] += 1 + elif base_result == "Miss": + base_dict["Missing compiles"] += 1 + else: + assert(base_result == "Failure") + base_dict["Failing compiles"] += 1 + + diff_result = row["Diff result"] + if diff_result == "Success": + diff_dict["Successful compiles"] += 1 + elif diff_result == "Miss": + diff_dict["Missing compiles"] += 1 + else: + assert(diff_result == "Failure") + diff_dict["Failing compiles"] += 1 + + if base_result == "Success" and diff_result == "Success": + base_size = int(row["Base size"]) + diff_size = int(row["Diff size"]) + base_dict["Diffed code bytes"] += base_size + diff_dict["Diffed code bytes"] += diff_size + + base_insts = int(row["Base instructions"]) + diff_insts = int(row["Diff instructions"]) + base_dict["Diff executed instructions"] += base_insts + diff_dict["Diff executed instructions"] += diff_insts + + if row["Has diff"] == "True": + base_dict["Contexts with diffs"] += 1 + diff_dict["Contexts with diffs"] += 1 + + base_overall = base_minopts.copy() + for k in base_overall.keys(): + base_overall[k] += base_fullopts[k] + + diff_overall = diff_minopts.copy() + for k in diff_overall.keys(): + diff_overall[k] += diff_fullopts[k] + + return ({"Overall": base_overall, "MinOpts": base_minopts, "FullOpts": base_fullopts}, + {"Overall": diff_overall, "MinOpts": diff_minopts, "FullOpts": diff_fullopts}) + + class SuperPMIReplayAsmDiffs: """ SuperPMI Replay AsmDiffs class @@ -1905,19 +1971,14 @@ def replay_with_asm_diffs(self): logging.info("Running asm diffs of %s", mch_file) fail_mcl_file = os.path.join(temp_location, os.path.basename(mch_file) + "_fail.mcl") - diffs_info = os.path.join(temp_location, os.path.basename(mch_file) + "_diffs.csv") - - base_metrics_summary_file = os.path.join(temp_location, os.path.basename(mch_file) + "_base_metrics.csv") - diff_metrics_summary_file = os.path.join(temp_location, os.path.basename(mch_file) + "_diff_metrics.csv") + detailed_info_file = os.path.join(temp_location, os.path.basename(mch_file) + "_details.csv") flags = [ "-a", # Asm diffs "-v", "ewi", # display errors, warnings, missing, jit info "-f", fail_mcl_file, # Failing mc List - "-diffsInfo", diffs_info, # Information about diffs + "-details", detailed_info_file, # Detailed information about each context "-r", os.path.join(temp_location, "repro"), # Repro name prefix, create .mc repro files - "-baseMetricsSummary", base_metrics_summary_file, # Create summary of metrics we can use to get total code size impact - "-diffMetricsSummary", diff_metrics_summary_file, ] flags += altjit_asm_diffs_flags flags += base_option_flags @@ -1955,8 +2016,8 @@ def replay_with_asm_diffs(self): command = [self.superpmi_path] + flags + [self.base_jit_path, self.diff_jit_path, mch_file] return_code = run_and_log(command) - base_metrics = read_csv_metrics(base_metrics_summary_file) - diff_metrics = read_csv_metrics(diff_metrics_summary_file) + details = read_csv(detailed_info_file) + (base_metrics, diff_metrics) = aggregate_diff_metrics(details) print_superpmi_result(return_code, self.coreclr_args, base_metrics, diff_metrics) artifacts_base_name = create_artifacts_base_name(self.coreclr_args, mch_file) @@ -1976,8 +2037,7 @@ def replay_with_asm_diffs(self): repro_base_command_line = "{} {} {}".format(self.superpmi_path, " ".join(altjit_asm_diffs_flags), self.diff_jit_path) save_repro_mc_files(temp_location, self.coreclr_args, artifacts_base_name, repro_base_command_line) - diffs = read_csv_diffs(diffs_info) - + diffs = [r for r in details if r["Has diff"] == "True"] if any(diffs): files_with_asm_diffs.append(mch_file) @@ -2212,13 +2272,9 @@ def create_exception(): os.remove(fail_mcl_file) fail_mcl_file = None - if os.path.isfile(base_metrics_summary_file): - os.remove(base_metrics_summary_file) - base_metrics_summary_file = None - - if os.path.isfile(diff_metrics_summary_file): - os.remove(diff_metrics_summary_file) - diff_metrics_summary_file = None + if os.path.isfile(detailed_info_file): + os.remove(detailed_info_file) + detailed_info_file = None ################################################################################################ end of for mch_file in self.mch_files @@ -2647,8 +2703,7 @@ def replay_with_throughput_diff(self): logging.info("Running throughput diff of %s", mch_file) - base_metrics_summary_file = os.path.join(temp_location, os.path.basename(mch_file) + "_base_metrics.csv") - diff_metrics_summary_file = os.path.join(temp_location, os.path.basename(mch_file) + "_diff_metrics.csv") + detailed_info_file = os.path.join(temp_location, os.path.basename(mch_file) + "_details.csv") pin_options = [ "-follow_execv", # attach to child processes @@ -2656,8 +2711,7 @@ def replay_with_throughput_diff(self): ] flags = [ "-applyDiff", - "-baseMetricsSummary", base_metrics_summary_file, # Instruction counts are stored in these - "-diffMetricsSummary", diff_metrics_summary_file, + "-details", detailed_info_file, ] flags += target_flags flags += base_option_flags @@ -2692,8 +2746,8 @@ def replay_with_throughput_diff(self): command_string = " ".join(command) logging.debug("'%s': Error return code: %s", command_string, return_code) - base_metrics = read_csv_metrics(base_metrics_summary_file) - diff_metrics = read_csv_metrics(diff_metrics_summary_file) + details = read_csv(detailed_info_file) + (base_metrics, diff_metrics) = aggregate_diff_metrics(details) if base_metrics is not None and diff_metrics is not None: base_instructions = int(base_metrics["Overall"]["Diff executed instructions"]) @@ -2711,13 +2765,9 @@ def replay_with_throughput_diff(self): logging.warning("No metric files present?") if not self.coreclr_args.skip_cleanup: - if os.path.isfile(base_metrics_summary_file): - os.remove(base_metrics_summary_file) - base_metrics_summary_file = None - - if os.path.isfile(diff_metrics_summary_file): - os.remove(diff_metrics_summary_file) - diff_metrics_summary_file = None + if os.path.isfile(detailed_info_file): + os.remove(detailed_info_file) + detailed_info_file = None ################################################################################################ end of for mch_file in self.mch_files diff --git a/src/coreclr/tools/Common/Internal/Runtime/MethodTable.Constants.cs b/src/coreclr/tools/Common/Internal/Runtime/MethodTable.Constants.cs index b528195d066b6..4e0d70ab32471 100644 --- a/src/coreclr/tools/Common/Internal/Runtime/MethodTable.Constants.cs +++ b/src/coreclr/tools/Common/Internal/Runtime/MethodTable.Constants.cs @@ -32,9 +32,9 @@ internal enum EETypeFlags : uint HasFinalizerFlag = 0x00100000, /// - /// This type contain GC pointers. + /// This type has optional fields present. /// - HasPointersFlag = 0x00200000, + OptionalFieldsFlag = 0x00200000, /// /// This MethodTable has sealed vtable entries @@ -48,9 +48,9 @@ internal enum EETypeFlags : uint GenericVarianceFlag = 0x00800000, /// - /// This type has optional fields present. + /// This type contain GC pointers. /// - OptionalFieldsFlag = 0x01000000, + HasPointersFlag = 0x01000000, /// /// This type is generic. diff --git a/src/coreclr/tools/Common/TypeSystem/IL/UnsafeAccessors.cs b/src/coreclr/tools/Common/TypeSystem/IL/UnsafeAccessors.cs index e9c9b52621369..6338f725be223 100644 --- a/src/coreclr/tools/Common/TypeSystem/IL/UnsafeAccessors.cs +++ b/src/coreclr/tools/Common/TypeSystem/IL/UnsafeAccessors.cs @@ -71,13 +71,12 @@ public static MethodIL TryGetIL(EcmaMethod method) return GenerateAccessorBadImageFailure(method); } - const string ctorName = ".ctor"; - context.TargetType = ValidateTargetType(retType); - if (context.TargetType == null) + if (!ValidateTargetType(retType, out context.TargetType)) { return GenerateAccessorBadImageFailure(method); } + const string ctorName = ".ctor"; if (!TrySetTargetMethod(ref context, ctorName, out isAmbiguous)) { return GenerateAccessorSpecificFailure(ref context, ctorName, isAmbiguous); @@ -100,8 +99,7 @@ public static MethodIL TryGetIL(EcmaMethod method) return GenerateAccessorBadImageFailure(method); } - context.TargetType = ValidateTargetType(firstArgType); - if (context.TargetType == null) + if (!ValidateTargetType(firstArgType, out context.TargetType)) { return GenerateAccessorBadImageFailure(method); } @@ -132,8 +130,7 @@ public static MethodIL TryGetIL(EcmaMethod method) return GenerateAccessorBadImageFailure(method); } - context.TargetType = ValidateTargetType(firstArgType); - if (context.TargetType == null) + if (!ValidateTargetType(firstArgType, out context.TargetType)) { return GenerateAccessorBadImageFailure(method); } @@ -221,7 +218,7 @@ private struct GenerationContext public FieldDesc TargetField; } - private static TypeDesc ValidateTargetType(TypeDesc targetTypeMaybe) + private static bool ValidateTargetType(TypeDesc targetTypeMaybe, out TypeDesc validated) { TypeDesc targetType = targetTypeMaybe.IsByRef ? ((ParameterizedType)targetTypeMaybe).ParameterType @@ -232,10 +229,11 @@ private static TypeDesc ValidateTargetType(TypeDesc targetTypeMaybe) if ((targetType.IsParameterizedType && !targetType.IsArray) || targetType.IsFunctionPointer) { - ThrowHelper.ThrowBadImageFormatException(); + targetType = null; } - return targetType; + validated = targetType; + return validated != null; } private static bool DoesMethodMatchUnsafeAccessorDeclaration(ref GenerationContext context, MethodDesc method, bool ignoreCustomModifiers) diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/HardwareIntrinsicILProvider.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/HardwareIntrinsicILProvider.cs new file mode 100644 index 0000000000000..5e3f9e3f8c0a3 --- /dev/null +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/HardwareIntrinsicILProvider.cs @@ -0,0 +1,69 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Collections.Generic; + +using Internal.IL; +using Internal.IL.Stubs; +using Internal.JitInterface; +using Internal.TypeSystem; + +namespace ILCompiler +{ + public sealed class HardwareIntrinsicILProvider : ILProvider + { + private readonly InstructionSetSupport _isaSupport; + private readonly TypeSystemContext _context; + private readonly FieldDesc _isSupportedField; + private readonly ILProvider _nestedProvider; + private readonly Dictionary _instructionSetMap; + + public HardwareIntrinsicILProvider(InstructionSetSupport isaSupport, FieldDesc isSupportedField, ILProvider nestedProvider) + { + _isaSupport = isaSupport; + _context = isSupportedField.Context; + _isSupportedField = isSupportedField; + _nestedProvider = nestedProvider; + + _instructionSetMap = new Dictionary(); + foreach (var instructionSetInfo in InstructionSetFlags.ArchitectureToValidInstructionSets(_context.Target.Architecture)) + { + if (instructionSetInfo.ManagedName != "") + _instructionSetMap.Add(instructionSetInfo.ManagedName, instructionSetInfo.InstructionSet); + } + } + + public override MethodIL GetMethodIL(MethodDesc method) + { + TypeDesc owningType = method.OwningType; + string intrinsicId = InstructionSetSupport.GetHardwareIntrinsicId(_context.Target.Architecture, owningType); + if (!string.IsNullOrEmpty(intrinsicId) + && HardwareIntrinsicHelpers.IsIsSupportedMethod(method)) + { + InstructionSet instructionSet = _instructionSetMap[intrinsicId]; + + bool isSupported = _isaSupport.IsInstructionSetSupported(instructionSet); + bool isOptimisticallySupported = _isaSupport.OptimisticFlags.HasInstructionSet(instructionSet); + + // If this is an instruction set that is optimistically supported, but is not one of the + // intrinsics that are known to be always available, emit IL that checks the support level + // at runtime. + if (!isSupported && isOptimisticallySupported) + { + return HardwareIntrinsicHelpers.EmitIsSupportedIL(method, _isSupportedField, instructionSet); + } + else + { + ILOpcode flag = isSupported ? ILOpcode.ldc_i4_1 : ILOpcode.ldc_i4_0; + return new ILStubMethodIL(method, + new byte[] { (byte)flag, (byte)ILOpcode.ret }, + Array.Empty(), + null); + } + } + + return _nestedProvider.GetMethodIL(method); + } + } +} diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs index 989d4782402ac..24bbc6c286d45 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/TypePreinit.cs @@ -37,6 +37,7 @@ public class TypePreinit private readonly TypePreinitializationPolicy _policy; private readonly Dictionary _fieldValues = new Dictionary(); private readonly Dictionary _internedStrings = new Dictionary(); + private readonly Dictionary _internedTypes = new Dictionary(); private TypePreinit(MetadataType owningType, CompilationModuleGroup compilationGroup, ILProvider ilProvider, TypePreinitializationPolicy policy) { @@ -167,6 +168,9 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack::.ctor(void*, int32) + StackEntry entry = stack.Pop(); + long size = entry.ValueKind switch + { + StackValueKind.Int32 => entry.Value.AsInt32(), + StackValueKind.NativeInt => (context.Target.PointerSize == 4) + ? entry.Value.AsInt32() : entry.Value.AsInt64(), + _ => long.MaxValue + }; + + // Arbitrary limit for allocation size to prevent compiler OOM + if (size < 0 || size > 8192) + return Status.Fail(methodIL.OwningMethod, ILOpcode.localloc); + + opcode = reader.ReadILOpcode(); + if (opcode < ILOpcode.ldc_i4_0 || opcode > ILOpcode.ldc_i4) + return Status.Fail(methodIL.OwningMethod, ILOpcode.localloc); + + int maybeSpanLength = opcode switch + { + ILOpcode.ldc_i4_s => (sbyte)reader.ReadILByte(), + ILOpcode.ldc_i4 => (int)reader.ReadILUInt32(), + _ => opcode - ILOpcode.ldc_i4_0, + }; + + opcode = reader.ReadILOpcode(); + if (opcode != ILOpcode.newobj) + return Status.Fail(methodIL.OwningMethod, ILOpcode.localloc); + + var ctorMethod = (MethodDesc)methodIL.GetObject(reader.ReadILToken()); + if (!TryGetSpanElementType(ctorMethod.OwningType, isReadOnlySpan: false, out MetadataType elementType) + || ctorMethod.Signature.Length != 2 + || !ctorMethod.Signature[0].IsPointer + || !ctorMethod.Signature[1].IsWellKnownType(WellKnownType.Int32) + || maybeSpanLength * elementType.InstanceFieldSize.AsInt != size) + return Status.Fail(methodIL.OwningMethod, ILOpcode.localloc); + + var instance = new ReadOnlySpanValue(elementType, new byte[size], index: 0, (int)size); + stack.PushFromLocation(ctorMethod.OwningType, instance); + } + break; + case ILOpcode.stfld: { FieldDesc field = (FieldDesc)methodIL.GetObject(reader.ReadILToken()); @@ -669,14 +761,17 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack= ILOpcode.ldind_i1 and <= ILOpcode.ldind_ref) or ILOpcode.ldobj)) + { + // In the interpreter memory model, there's no conversion from a byref to an integer. + // Roslyn however sometimes emits a sequence of conv_u followed by ldind and we can + // have a narrow path to handle that one. + // + // For example: + // + // static unsafe U Read(T val) where T : unmanaged where U : unmanaged => *(U*)&val; + stack.Push(popped); + goto again; + } else { return Status.Fail(methodIL.OwningMethod, opcode); @@ -787,16 +920,26 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack reader.ReadILByte(), - ILOpcode.ldloca => reader.ReadILUInt16(), + ILOpcode.ldloca_s or ILOpcode.ldarga_s => reader.ReadILByte(), + ILOpcode.ldloca or ILOpcode.ldarga => reader.ReadILUInt16(), _ => throw new NotImplementedException(), // Unreachable }; - if (index >= locals.Length) + Value[] storage = opcode is ILOpcode.ldloca or ILOpcode.ldloca_s ? locals : parameters; + if (index >= storage.Length) { ThrowHelper.ThrowInvalidProgramException(); } - Value localValue = locals[index]; + Value localValue = storage[index]; if (localValue == null || !localValue.TryCreateByRef(out Value byrefValue)) { return Status.Fail(methodIL.OwningMethod, opcode); @@ -971,7 +1117,7 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack throw new NotImplementedException() // unreachable }; } - else if (value1.ValueKind == StackValueKind.Int64 && value2.ValueKind == StackValueKind.Int64) + else if (value1.ValueKind.WithNormalizedNativeInt(context) == StackValueKind.Int64 && value2.ValueKind.WithNormalizedNativeInt(context) == StackValueKind.Int64) { branchTaken = normalizedOpcode switch { @@ -1040,6 +1186,28 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack count) + { + reader.Seek(nextInstruction); + } + else + { + reader.Seek(reader.Offset + (int)(4 * target)); + reader.Seek(nextInstruction + (int)reader.ReadILUInt32()); + } + } + break; + case ILOpcode.leave: case ILOpcode.leave_s: { @@ -1075,7 +1243,7 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack throw new NotImplementedException(), // unreachable }; - stack.Push(StackValueKind.Int32, ValueTypeValue.FromInt32(result)); + stack.Push(isNint ? StackValueKind.NativeInt : StackValueKind.Int32, ValueTypeValue.FromInt32(result)); } - else if (value1.ValueKind == StackValueKind.Int64 && value2.ValueKind == StackValueKind.Int64) + else if (value1.ValueKind.WithNormalizedNativeInt(context) == StackValueKind.Int64 && value2.ValueKind.WithNormalizedNativeInt(context) == StackValueKind.Int64) { if (isDivRem && value2.Value.AsInt64() == 0) return Status.Fail(methodIL.OwningMethod, opcode, "Division by zero"); @@ -1218,7 +1389,7 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack throw new NotImplementedException(), // unreachable }; - stack.Push(StackValueKind.Int64, ValueTypeValue.FromInt64(result)); + stack.Push(isNint ? StackValueKind.NativeInt : StackValueKind.Int64, ValueTypeValue.FromInt64(result)); } else if (value1.ValueKind == StackValueKind.Float && value2.ValueKind == StackValueKind.Float) { @@ -1244,7 +1415,32 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack addend.Value.AsInt32(), + _ => context.Target.PointerSize == 8 ? addend.Value.AsInt64() : addend.Value.AsInt32() + }; + + var previousByRef = (ByRefValue)reference.Value; + if (addition > previousByRef.PointedToBytes.Length - previousByRef.PointedToOffset + || addition + previousByRef.PointedToOffset < 0) + return Status.Fail(methodIL.OwningMethod, "Out of range byref access"); + + stack.Push(StackValueKind.ByRef, new ByRefValue(previousByRef.PointedToBytes, (int)(previousByRef.PointedToOffset + addition))); } else { @@ -1417,6 +1613,7 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack ILOpcode.ldind_i1, + TypeFlags.Boolean or TypeFlags.Byte => ILOpcode.ldind_u1, + TypeFlags.Int16 => ILOpcode.ldind_i2, + TypeFlags.Char or TypeFlags.UInt16 => ILOpcode.ldind_u2, + TypeFlags.Int32 => ILOpcode.ldind_i4, + TypeFlags.UInt32 => ILOpcode.ldind_u4, + TypeFlags.Int64 or TypeFlags.UInt64 => ILOpcode.ldind_i8, + TypeFlags.Single => ILOpcode.ldind_r4, + TypeFlags.Double => ILOpcode.ldind_r8, + _ => ILOpcode.ldobj, + }; + + if (opcode == ILOpcode.ldobj) + { + return Status.Fail(methodIL.OwningMethod, opcode); + } + } + StackEntry entry = stack.Pop(); if (entry.Value is ByRefValue byRefVal) { @@ -1464,10 +1684,82 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack ILOpcode.stind_i1, + TypeFlags.Int16 or TypeFlags.Char or TypeFlags.UInt16 => ILOpcode.stind_i2, + TypeFlags.Int32 or TypeFlags.UInt32 => ILOpcode.stind_i4, + TypeFlags.Int64 or TypeFlags.UInt64 => ILOpcode.stind_i8, + _ => ILOpcode.stobj, + }; + + if (opcode == ILOpcode.stobj) + { + return Status.Fail(methodIL.OwningMethod, opcode); + } + } + + Value val = opcode switch + { + ILOpcode.stind_i1 => stack.PopIntoLocation(context.GetWellKnownType(WellKnownType.Byte)), + ILOpcode.stind_i2 => stack.PopIntoLocation(context.GetWellKnownType(WellKnownType.UInt16)), + ILOpcode.stind_i4 => stack.PopIntoLocation(context.GetWellKnownType(WellKnownType.UInt32)), + ILOpcode.stind_i8 => stack.PopIntoLocation(context.GetWellKnownType(WellKnownType.UInt64)), + _ => throw new NotImplementedException() + }; + + StackEntry location = stack.Pop(); + if (location.ValueKind != StackValueKind.ByRef) + ThrowHelper.ThrowInvalidProgramException(); + + byte[] dest = ((ByRefValue)location.Value).PointedToBytes; + int destOffset = ((ByRefValue)location.Value).PointedToOffset; + byte[] src = ((ValueTypeValue)val).InstanceBytes; + if (destOffset + src.Length > dest.Length) + return Status.Fail(methodIL.OwningMethod, "Out of bound access"); + Array.Copy(src, 0, dest, destOffset, src.Length); + } + break; + case ILOpcode.constrained: - // Fallthrough. If this is ever implemented, make sure delegates to static virtual methods - // are also handled. We currently assume the frozen delegate will not be to a static - // virtual interface method. + constrainedType = methodIL.GetObject(reader.ReadILToken()) as TypeDesc; + goto again; + + case ILOpcode.unaligned: + reader.ReadILByte(); + break; + + case ILOpcode.initblk: + { + StackEntry size = stack.Pop(); + StackEntry value = stack.Pop(); + StackEntry addr = stack.Pop(); + + if (size.ValueKind != StackValueKind.Int32 + || value.ValueKind != StackValueKind.Int32 + || addr.ValueKind != StackValueKind.ByRef) + return Status.Fail(methodIL.OwningMethod, opcode); + + uint sizeBytes = (uint)size.Value.AsInt32(); + + var addressValue = (ByRefValue)addr.Value; + if (sizeBytes > addressValue.PointedToBytes.Length - addressValue.PointedToOffset + || sizeBytes > int.MaxValue /* paranoid check that cast to int is legit */) + return Status.Fail(methodIL.OwningMethod, opcode); + + Array.Fill(addressValue.PointedToBytes, (byte)value.Value.AsInt32(), addressValue.PointedToOffset, (int)sizeBytes); + } + break; + default: return Status.Fail(methodIL.OwningMethod, opcode); } @@ -1477,19 +1769,35 @@ private Status TryScanMethod(MethodIL methodIL, Value[] parameters, Stack(), 0, 0); + } + else if (TryGetSpanElementType(locationType, isReadOnlySpan: false, out MetadataType spanElementType)) { - return new ReadOnlySpanValue(readOnlySpanElementType, Array.Empty()); + return new ReadOnlySpanValue(spanElementType, Array.Empty(), 0, 0); } else { @@ -1498,7 +1806,7 @@ private static BaseValueTypeValue NewUninitializedLocationValue(TypeDesc locatio } } - private static bool TryHandleIntrinsicCall(MethodDesc method, Value[] parameters, out Value retVal) + private bool TryHandleIntrinsicCall(MethodDesc method, Value[] parameters, out Value retVal) { retVal = default; @@ -1531,7 +1839,7 @@ private static bool TryHandleIntrinsicCall(MethodDesc method, Value[] parameters byte[] rvaData = Internal.TypeSystem.Ecma.EcmaFieldExtensions.GetFieldRvaData(createSpanEcmaField); if (rvaData.Length % elementSize != 0) return false; - retVal = new ReadOnlySpanValue(elementType, rvaData); + retVal = new ReadOnlySpanValue(elementType, rvaData, 0, rvaData.Length); return true; } return false; @@ -1544,6 +1852,34 @@ private static bool TryHandleIntrinsicCall(MethodDesc method, Value[] parameters return spanRef.TryAccessElement(spanIndex.AsInt32(), out retVal); } return false; + case "GetTypeFromHandle" when method.OwningType is MetadataType typeType + && typeType.Name == "Type" && typeType.Namespace == "System" + && typeType.Module == typeType.Context.SystemModule + && parameters[0] is RuntimeTypeHandleValue typeHandle: + { + if (!_internedTypes.TryGetValue(typeHandle.Type, out RuntimeTypeValue runtimeType)) + { + _internedTypes.Add(typeHandle.Type, runtimeType = new RuntimeTypeValue(typeHandle.Type)); + } + retVal = runtimeType; + return true; + } + case "get_IsValueType" when method.OwningType is MetadataType typeType + && typeType.Name == "Type" && typeType.Namespace == "System" + && typeType.Module == typeType.Context.SystemModule + && parameters[0] is RuntimeTypeValue typeToCheckForValueType: + { + retVal = ValueTypeValue.FromSByte(typeToCheckForValueType.TypeRepresented.IsValueType ? (sbyte)1 : (sbyte)0); + return true; + } + case "op_Equality" when method.OwningType is MetadataType typeType + && typeType.Name == "Type" && typeType.Namespace == "System" + && typeType.Module == typeType.Context.SystemModule + && (parameters[0] is RuntimeTypeValue || parameters[1] is RuntimeTypeValue): + { + retVal = ValueTypeValue.FromSByte(parameters[0] == parameters[1] ? (sbyte)1 : (sbyte)0); + return true; + } } return false; @@ -1785,18 +2121,6 @@ public Value PopIntoLocation(TypeDesc locationType) } } - private enum StackValueKind - { - Unknown, - Int32, - Int64, - NativeInt, - Float, - ByRef, - ObjRef, - ValueType, - } - /// /// Represents a field value that can be serialized into a preinitialized blob. /// @@ -2023,15 +2347,73 @@ public override bool GetRawData(NodeFactory factory, out object data) } } + private sealed class RuntimeTypeHandleValue : BaseValueTypeValue, IInternalModelingOnlyValue + { + public TypeDesc Type { get; } + + public RuntimeTypeHandleValue(TypeDesc type) + { + Type = type; + } + + public override int Size => Type.Context.Target.PointerSize; + + public override bool Equals(Value value) + { + if (!(value is RuntimeTypeHandleValue)) + { + ThrowHelper.ThrowInvalidProgramException(); + } + + return Type == ((RuntimeTypeHandleValue)value).Type; + } + + public override void WriteFieldData(ref ObjectDataBuilder builder, NodeFactory factory) + { + throw new NotSupportedException(); + } + + public override bool GetRawData(NodeFactory factory, out object data) + { + data = null; + return false; + } + } + + private sealed class RuntimeTypeValue : ReferenceTypeValue, IInternalModelingOnlyValue + { + public TypeDesc TypeRepresented { get; } + + public RuntimeTypeValue(TypeDesc type) + : base(type.Context.SystemModule.GetKnownType("System", "RuntimeType")) + { + TypeRepresented = type; + } + + public override bool GetRawData(NodeFactory factory, out object data) + { + data = null; + return false; + } + public override ReferenceTypeValue ToForeignInstance(int baseInstructionCounter) => this; + public override void WriteFieldData(ref ObjectDataBuilder builder, NodeFactory factory) => throw new NotImplementedException(); + } + private sealed class ReadOnlySpanValue : BaseValueTypeValue, IInternalModelingOnlyValue { private readonly MetadataType _elementType; private readonly byte[] _bytes; + private readonly int _index; + private readonly int _length; - public ReadOnlySpanValue(MetadataType elementType, byte[] bytes) + public ReadOnlySpanValue(MetadataType elementType, byte[] bytes, int index, int length) { + Debug.Assert(index <= bytes.Length); + Debug.Assert(length <= bytes.Length - index); _elementType = elementType; _bytes = bytes; + _index = index; + _length = length; } public override int Size => 2 * _elementType.Context.Target.PointerSize; @@ -2063,20 +2445,26 @@ public override Value Clone() public override bool TryCreateByRef(out Value value) { - value = new ReadOnlySpanReferenceValue(_elementType, _bytes); + value = new ReadOnlySpanReferenceValue(_elementType, _bytes, _index, _length); return true; } } - private sealed class ReadOnlySpanReferenceValue : Value + private sealed class ReadOnlySpanReferenceValue : Value, IHasInstanceFields { private readonly MetadataType _elementType; private readonly byte[] _bytes; + private readonly int _index; + private readonly int _length; - public ReadOnlySpanReferenceValue(MetadataType elementType, byte[] bytes) + public ReadOnlySpanReferenceValue(MetadataType elementType, byte[] bytes, int index, int length) { + Debug.Assert(index <= bytes.Length); + Debug.Assert(length <= bytes.Length - index); _elementType = elementType; _bytes = bytes; + _index = index; + _length = length; } public override bool Equals(Value value) @@ -2101,13 +2489,38 @@ public override bool GetRawData(NodeFactory factory, out object data) public bool TryAccessElement(int index, out Value value) { value = default; - int limit = _bytes.Length / _elementType.InstanceFieldSize.AsInt; + int limit = _length / _elementType.InstanceFieldSize.AsInt; if (index >= limit) return false; - value = new ByRefValue(_bytes, index * _elementType.InstanceFieldSize.AsInt); + value = new ByRefValue(_bytes, _index + index * _elementType.InstanceFieldSize.AsInt); return true; } + + public void SetField(FieldDesc field, Value value) => ThrowHelper.ThrowInvalidProgramException(); + + public Value GetField(FieldDesc field) + { + MetadataType elementType; + if (!TryGetSpanElementType(field.OwningType, isReadOnlySpan: true, out elementType) + && !TryGetSpanElementType(field.OwningType, isReadOnlySpan: false, out elementType)) + ThrowHelper.ThrowInvalidProgramException(); + + if (elementType != _elementType) + ThrowHelper.ThrowInvalidProgramException(); + + if (field.Name == "_length") + return ValueTypeValue.FromInt32(_length / _elementType.InstanceFieldSize.AsInt); + + Debug.Assert(field.Name == "_reference"); + return new ByRefValue(_bytes, _index); + } + + public ByRefValue GetFieldAddress(FieldDesc field) + { + ThrowHelper.ThrowInvalidProgramException(); + return null; // unreached + } } private sealed class MethodPointerValue : BaseValueTypeValue, IInternalModelingOnlyValue @@ -2773,4 +3186,15 @@ public sealed class TypeLoaderAwarePreinitializationPolicy : TypePreinitializati public override bool CanPreinitializeAllConcreteFormsForCanonForm(DefType type) => false; } } + +#pragma warning disable SA1400 // Element 'Extensions' should declare an access modifier + file static class Extensions + { + public static StackValueKind WithNormalizedNativeInt(this StackValueKind kind, TypeSystemContext context) + => kind switch + { + StackValueKind.NativeInt => context.Target.PointerSize == 8 ? StackValueKind.Int64 : StackValueKind.Int32, + _ => kind + }; + } } diff --git a/src/coreclr/tools/aot/ILCompiler.Compiler/ILCompiler.Compiler.csproj b/src/coreclr/tools/aot/ILCompiler.Compiler/ILCompiler.Compiler.csproj index b67f3fa966e0d..513b2f95d64c5 100644 --- a/src/coreclr/tools/aot/ILCompiler.Compiler/ILCompiler.Compiler.csproj +++ b/src/coreclr/tools/aot/ILCompiler.Compiler/ILCompiler.Compiler.csproj @@ -451,6 +451,7 @@ + diff --git a/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/RyuJitCompilation.cs b/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/RyuJitCompilation.cs index a7615fc54fc87..1f3252f3ddc4b 100644 --- a/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/RyuJitCompilation.cs +++ b/src/coreclr/tools/aot/ILCompiler.RyuJit/Compiler/RyuJitCompilation.cs @@ -22,8 +22,6 @@ public sealed class RyuJitCompilation : Compilation { private readonly ConditionalWeakTable _corinfos = new ConditionalWeakTable(); internal readonly RyuJitCompilationOptions _compilationOptions; - private readonly ExternSymbolMappedField _hardwareIntrinsicFlags; - private readonly Dictionary _instructionSetMap; private readonly ProfileDataManager _profileDataManager; private readonly MethodImportationErrorProvider _methodImportationErrorProvider; private readonly int _parallelism; @@ -47,16 +45,8 @@ internal RyuJitCompilation( : base(dependencyGraph, nodeFactory, roots, ilProvider, debugInformationProvider, devirtualizationManager, inliningPolicy, logger) { _compilationOptions = options; - _hardwareIntrinsicFlags = new ExternSymbolMappedField(nodeFactory.TypeSystemContext.GetWellKnownType(WellKnownType.Int32), "g_cpuFeatures"); InstructionSetSupport = instructionSetSupport; - _instructionSetMap = new Dictionary(); - foreach (var instructionSetInfo in InstructionSetFlags.ArchitectureToValidInstructionSets(TypeSystemContext.Target.Architecture)) - { - if (instructionSetInfo.ManagedName != "") - _instructionSetMap.Add(instructionSetInfo.ManagedName, instructionSetInfo.InstructionSet); - } - _profileDataManager = profileDataManager; _methodImportationErrorProvider = errorProvider; @@ -213,28 +203,6 @@ private void CompileSingleMethod(CorInfoImpl corInfo, MethodCodeNode methodCodeN Logger.LogError($"Method will always throw because: {exception.Message}", 1005, method, MessageSubCategory.AotAnalysis); } } - - public override MethodIL GetMethodIL(MethodDesc method) - { - TypeDesc owningType = method.OwningType; - string intrinsicId = InstructionSetSupport.GetHardwareIntrinsicId(TypeSystemContext.Target.Architecture, owningType); - if (!string.IsNullOrEmpty(intrinsicId) - && HardwareIntrinsicHelpers.IsIsSupportedMethod(method)) - { - InstructionSet instructionSet = _instructionSetMap[intrinsicId]; - - // If this is an instruction set that is optimistically supported, but is not one of the - // intrinsics that are known to be always available, emit IL that checks the support level - // at runtime. - if (!InstructionSetSupport.IsInstructionSetSupported(instructionSet) - && InstructionSetSupport.OptimisticFlags.HasInstructionSet(instructionSet)) - { - return HardwareIntrinsicHelpers.EmitIsSupportedIL(method, _hardwareIntrinsicFlags, instructionSet); - } - } - - return base.GetMethodIL(method); - } } [Flags] diff --git a/src/coreclr/tools/aot/ILCompiler.Trimming.Tests/TestCasesRunner/ResultChecker.cs b/src/coreclr/tools/aot/ILCompiler.Trimming.Tests/TestCasesRunner/ResultChecker.cs index ab4a0b94d76f6..70016d6f85759 100644 --- a/src/coreclr/tools/aot/ILCompiler.Trimming.Tests/TestCasesRunner/ResultChecker.cs +++ b/src/coreclr/tools/aot/ILCompiler.Trimming.Tests/TestCasesRunner/ResultChecker.cs @@ -303,18 +303,19 @@ private void VerifyLoggedMessages (AssemblyDefinition original, TrimmingTestLogg loggedMessages.Remove (loggedMessage); break; } - if (actualName?.StartsWith (expectedTypeName) == true && - actualName?.Contains (".cctor") == true && - (expectedMember is FieldDefinition || expectedMember is PropertyDefinition)) { - expectedWarningFound = true; - loggedMessages.Remove (loggedMessage); - break; - } - if (methodDesc.IsConstructor && - new AssemblyQualifiedToken (methodDesc.OwningType).Equals(new AssemblyQualifiedToken (expectedMember))) { - expectedWarningFound = true; - loggedMessages.Remove (loggedMessage); - break; + if (actualName?.StartsWith (expectedTypeName) == true) { + if (actualName?.Contains (".cctor") == true && + (expectedMember is FieldDefinition || expectedMember is PropertyDefinition)) { + expectedWarningFound = true; + loggedMessages.Remove (loggedMessage); + break; + } + if (methodDesc.IsConstructor && + (expectedMember is FieldDefinition || expectedMember is PropertyDefinition || new AssemblyQualifiedToken (methodDesc.OwningType).Equals(new AssemblyQualifiedToken (expectedMember)))) { + expectedWarningFound = true; + loggedMessages.Remove (loggedMessage); + break; + } } } else if (attrProvider is AssemblyDefinition expectedAssembly) { // Allow assembly-level attributes to match warnings from compiler-generated Main diff --git a/src/coreclr/tools/aot/ILCompiler/Program.cs b/src/coreclr/tools/aot/ILCompiler/Program.cs index ed7b013ae3d94..ce3912742aad7 100644 --- a/src/coreclr/tools/aot/ILCompiler/Program.cs +++ b/src/coreclr/tools/aot/ILCompiler/Program.cs @@ -20,6 +20,7 @@ using Internal.TypeSystem.Ecma; using ILCompiler.Dataflow; +using ILCompiler.DependencyAnalysis; using ILLink.Shared; using Debug = System.Diagnostics.Debug; @@ -143,6 +144,11 @@ public int Run() if (typeSystemContext.InputFilePaths.Count == 0) throw new CommandLineException("No input files specified"); + ilProvider = new HardwareIntrinsicILProvider( + instructionSetSupport, + new ExternSymbolMappedField(typeSystemContext.GetWellKnownType(WellKnownType.Int32), "g_cpuFeatures"), + ilProvider); + SecurityMitigationOptions securityMitigationOptions = 0; string guard = Get(_command.Guard); if (StringComparer.OrdinalIgnoreCase.Equals(guard, "cf")) diff --git a/src/coreclr/tools/aot/crossgen2/crossgen2.csproj b/src/coreclr/tools/aot/crossgen2/crossgen2.csproj index 9df088cd4c249..6f8578b26b969 100644 --- a/src/coreclr/tools/aot/crossgen2/crossgen2.csproj +++ b/src/coreclr/tools/aot/crossgen2/crossgen2.csproj @@ -1,113 +1,14 @@ - - - - + - $(RuntimeBinDir)crossgen2 - - false - false - true - linux-x64;linux-musl-x64;linux-arm;linux-musl-arm;linux-arm64;linux-musl-arm64;freebsd-x64;freebsd-arm64;osx-x64;osx-arm64;win-x64;win-x86;win-arm64 - $(PackageRID) - false - true + $(RuntimeBinDir)/crossgen2 + false + + $(NetCoreAppToolCurrent) - - - - true - true - - false - - false - true - - - - - - $(CoreCLRILCompilerDir) - $(CoreCLRCrossILCompilerDir) - $(ROOTFS_DIR) - $(CoreCLRILCompilerDir)netstandard/ILCompiler.Build.Tasks.dll - $(CoreCLRAotSdkDir) - $(MicrosoftNetCoreAppRuntimePackRidLibTfmDir) - $(MicrosoftNetCoreAppRuntimePackNativeDir) - false - - .dwarf - --flat - - - - - - - - - - - - - - - - - $(MicrosoftNetCoreAppRuntimePackDir) - - - - - - - - - $(RuntimeIdentifier) - - - x86_64 - aarch64 - arm64 - - - $(CrossCompileArch)-linux-gnu - $(CrossCompileArch)-alpine-linux-musl - $(CrossCompileArch)-unknown-freebsd12 - - - - - - - - - - clang - - - - - - - - $(_CC_LDFLAGS.SubString(0, $(_CC_LDFLAGS.IndexOf(';')))) - <_LDFLAGS>$(_CC_LDFLAGS.SubString($([MSBuild]::Add($(_CC_LDFLAGS.IndexOf(';')), 1)))) - lld - - - diff --git a/src/coreclr/tools/aot/crossgen2/crossgen2.props b/src/coreclr/tools/aot/crossgen2/crossgen2.props index 0f2f954c6a154..56ed8dd4277b3 100644 --- a/src/coreclr/tools/aot/crossgen2/crossgen2.props +++ b/src/coreclr/tools/aot/crossgen2/crossgen2.props @@ -3,7 +3,6 @@ crossgen2 true Exe - $(NetCoreAppToolCurrent) 8002,NU1701 x64;x86;arm64;arm;loongarch64 AnyCPU @@ -46,18 +45,16 @@ arm $(LibPrefix)jitinterface_$(TargetArchitectureForSharedLibraries)$(LibSuffix) - - $(RuntimeBinDir)$(CrossHostArch) - - - - $(BuildArchitecture) - $(RuntimeBinDir)/$(CrossHostArch)/crossgen2 - false - - - diff --git a/src/coreclr/tools/aot/crossgen2/crossgen2_inbuild.csproj b/src/coreclr/tools/aot/crossgen2/crossgen2_inbuild.csproj new file mode 100644 index 0000000000000..2ee5778d9e818 --- /dev/null +++ b/src/coreclr/tools/aot/crossgen2/crossgen2_inbuild.csproj @@ -0,0 +1,8 @@ + + + $(BuildArchitecture) + $(RuntimeBinDir)/$(BuildArchitecture)/crossgen2/tools + $(NetCoreAppToolCurrent) + + + diff --git a/src/coreclr/tools/aot/crossgen2/crossgen2_publish.csproj b/src/coreclr/tools/aot/crossgen2/crossgen2_publish.csproj new file mode 100644 index 0000000000000..948d29c14141e --- /dev/null +++ b/src/coreclr/tools/aot/crossgen2/crossgen2_publish.csproj @@ -0,0 +1,111 @@ + + + + <_IsPublishing>true + + + + + + + false + false + + false + true + $(PackageRID) + true + $(NetCoreAppCurrent) + true + + + + + + true + true + + false + + false + true + + + + + + + + + + + + + + $(CoreCLRILCompilerDir) + $(CoreCLRCrossILCompilerDir) + $(ROOTFS_DIR) + $(CoreCLRILCompilerDir)netstandard/ILCompiler.Build.Tasks.dll + $(CoreCLRAotSdkDir) + $(MicrosoftNetCoreAppRuntimePackRidLibTfmDir) + $(MicrosoftNetCoreAppRuntimePackNativeDir) + false + + .dwarf + --flat + + + + + + + + + + + + + + $(RuntimeIdentifier) + + + x86_64 + aarch64 + arm64 + + + $(CrossCompileArch)-linux-gnu + $(CrossCompileArch)-alpine-linux-musl + $(CrossCompileArch)-unknown-freebsd12 + + + + + + + + + + clang + + + + + + + + $(_CC_LDFLAGS.SubString(0, $(_CC_LDFLAGS.IndexOf(';')))) + <_LDFLAGS>$(_CC_LDFLAGS.SubString($([MSBuild]::Add($(_CC_LDFLAGS.IndexOf(';')), 1)))) + lld + + + + diff --git a/src/coreclr/tools/superpmi/superpmi/CMakeLists.txt b/src/coreclr/tools/superpmi/superpmi/CMakeLists.txt index 6f31a51a4e879..5dd42eb5631b2 100644 --- a/src/coreclr/tools/superpmi/superpmi/CMakeLists.txt +++ b/src/coreclr/tools/superpmi/superpmi/CMakeLists.txt @@ -31,7 +31,6 @@ set(SUPERPMI_SOURCES jitdebugger.cpp jitinstance.cpp methodstatsemitter.cpp - metricssummary.cpp neardiffer.cpp parallelsuperpmi.cpp superpmi.cpp diff --git a/src/coreclr/tools/superpmi/superpmi/commandline.cpp b/src/coreclr/tools/superpmi/superpmi/commandline.cpp index 9686ee51f0332..8f9529a72812a 100644 --- a/src/coreclr/tools/superpmi/superpmi/commandline.cpp +++ b/src/coreclr/tools/superpmi/superpmi/commandline.cpp @@ -85,11 +85,8 @@ void CommandLine::DumpHelp(const char* program) printf(" t - method throughput time\n"); printf(" * - all available method stats\n"); printf("\n"); - printf(" -metricsSummary , -baseMetricsSummary \n"); - printf(" Emit a summary of metrics to the specified file\n"); - printf("\n"); - printf(" -diffMetricsSummary \n"); - printf(" Same as above, but emit for the diff/second JIT"); + printf(" -details \n"); + printf(" Emit detailed information about the replay/diff of each context into the specified file\n"); printf("\n"); printf(" -a[pplyDiff]\n"); printf(" Compare the compile result generated from the provided JIT with the\n"); @@ -137,12 +134,14 @@ void CommandLine::DumpHelp(const char* program) printf(" -jitoption [force] key=value\n"); printf(" Set the JIT option named \"key\" to \"value\" for JIT 1 if the option was not set.\n"); printf(" With optional force flag overwrites the existing value if it was already set.\n"); - printf(" NOTE: do not use a \"DOTNET_\" prefix, \"key\" and \"value\" are case sensitive!\n"); + printf(" NOTE: do not use a \"DOTNET_\" prefix. \"key\" and \"value\" are case sensitive.\n"); + printf(" \"key#value\" is also accepted.\n"); printf("\n"); printf(" -jit2option [force] key=value\n"); printf(" Set the JIT option named \"key\" to \"value\" for JIT 2 if the option was not set.\n"); printf(" With optional force flag overwrites the existing value if it was already set.\n"); - printf(" NOTE: do not use a \"DOTNET_\" prefix, \"key\" and \"value\" are case sensitive!\n"); + printf(" NOTE: do not use a \"DOTNET_\" prefix. \"key\" and \"value\" are case sensitive.\n"); + printf(" \"key#value\" is also accepted.\n"); printf("\n"); printf("Inputs are case sensitive.\n"); printf("\n"); @@ -172,7 +171,7 @@ static bool ParseJitOption(const char* optionString, WCHAR** key, WCHAR** value) char tempKey[1024]; unsigned i; - for (i = 0; optionString[i] != '='; i++) + for (i = 0; (optionString[i] != '=') && (optionString[i] != '#'); i++) { if ((i >= 1023) || (optionString[i] == '\0')) { @@ -329,16 +328,6 @@ bool CommandLine::Parse(int argc, char* argv[], /* OUT */ Options* o) o->mclFilename = argv[i]; } - else if ((_strnicmp(&argv[i][1], "diffsInfo", 9) == 0)) - { - if (++i >= argc) - { - DumpHelp(argv[0]); - return false; - } - - o->diffsInfo = argv[i]; - } else if ((_strnicmp(&argv[i][1], "target", 6) == 0)) { if (++i >= argc) @@ -397,17 +386,7 @@ bool CommandLine::Parse(int argc, char* argv[], /* OUT */ Options* o) o->methodStatsTypes = argv[i]; } - else if ((_strnicmp(&argv[i][1], "metricsSummary", argLen) == 0) || (_strnicmp(&argv[i][1], "baseMetricsSummary", argLen) == 0)) - { - if (++i >= argc) - { - DumpHelp(argv[0]); - return false; - } - - o->baseMetricsSummaryFile = argv[i]; - } - else if ((_strnicmp(&argv[i][1], "diffMetricsSummary", argLen) == 0)) + else if ((_strnicmp(&argv[i][1], "details", argLen) == 0)) { if (++i >= argc) { @@ -415,7 +394,7 @@ bool CommandLine::Parse(int argc, char* argv[], /* OUT */ Options* o) return false; } - o->diffMetricsSummaryFile = argv[i]; + o->details = argv[i]; } else if ((_strnicmp(&argv[i][1], "applyDiff", argLen) == 0)) { @@ -809,13 +788,6 @@ bool CommandLine::Parse(int argc, char* argv[], /* OUT */ Options* o) } } - if (o->diffsInfo != nullptr && !o->applyDiff) - { - LogError("-diffsInfo specified without -applyDiff."); - DumpHelp(argv[0]); - return false; - } - if (o->skipCleanup && !o->parallel) { LogError("-skipCleanup requires -parallel."); diff --git a/src/coreclr/tools/superpmi/superpmi/commandline.h b/src/coreclr/tools/superpmi/superpmi/commandline.h index 45ce22e4a4b5a..70f01fbf1018c 100644 --- a/src/coreclr/tools/superpmi/superpmi/commandline.h +++ b/src/coreclr/tools/superpmi/superpmi/commandline.h @@ -13,78 +13,40 @@ class CommandLine class Options { public: - Options() - : nameOfJit(nullptr) - , nameOfJit2(nullptr) - , nameOfInputMethodContextFile(nullptr) - , verbosity(nullptr) - , writeLogFile(nullptr) - , reproName(nullptr) - , breakOnError(false) - , breakOnAssert(false) - , breakOnException(false) - , ignoreStoredConfig(false) - , applyDiff(false) - , parallel(false) + char* nameOfJit = nullptr; + char* nameOfJit2 = nullptr; + char* nameOfInputMethodContextFile = nullptr; + char* verbosity = nullptr; + char* writeLogFile = nullptr; + char* reproName = nullptr; + bool breakOnError = false; + bool breakOnAssert = false; + bool breakOnException = false; + bool ignoreStoredConfig = false; + bool applyDiff = false; + bool parallel = false; // User specified to use /parallel mode. #if !defined(USE_MSVCDIS) && defined(USE_COREDISTOOLS) - , useCoreDisTools(true) // if CoreDisTools is available (but MSVCDIS is not), use it. + bool useCoreDisTools = true; // Use CoreDisTools library instead of Msvcdis #else - , useCoreDisTools(false) // Otherwise, use MSVCDIS if that is available (else no diffs are available). + bool useCoreDisTools = false; // Use CoreDisTools library instead of Msvcdis #endif - , skipCleanup(false) - , workerCount(-1) - , indexCount(-1) - , failureLimit(-1) - , indexes(nullptr) - , hash(nullptr) - , methodStatsTypes(nullptr) - , baseMetricsSummaryFile(nullptr) - , diffMetricsSummaryFile(nullptr) - , mclFilename(nullptr) - , diffsInfo(nullptr) - , targetArchitecture(nullptr) - , compileList(nullptr) - , offset(-1) - , increment(-1) - , forceJitOptions(nullptr) - , forceJit2Options(nullptr) - , jitOptions(nullptr) - , jit2Options(nullptr) - { - } - - char* nameOfJit; - char* nameOfJit2; - char* nameOfInputMethodContextFile; - char* verbosity; - char* writeLogFile; - char* reproName; - bool breakOnError; - bool breakOnAssert; - bool breakOnException; - bool ignoreStoredConfig; - bool applyDiff; - bool parallel; // User specified to use /parallel mode. - bool useCoreDisTools; // Use CoreDisTools library instead of Msvcdis - bool skipCleanup; // In /parallel mode, do we skip cleanup of temporary files? Used for debugging /parallel. - int workerCount; // Number of workers to use for /parallel mode. -1 (or 1) means don't use parallel mode. - int indexCount; // If indexCount is -1 and hash points to nullptr it means compile all. - int failureLimit; // Number of failures after which bail out the replay/asmdiffs. - int* indexes; - char* hash; - char* methodStatsTypes; - char* baseMetricsSummaryFile; - char* diffMetricsSummaryFile; - char* mclFilename; - char* diffsInfo; - char* targetArchitecture; - char* compileList; - int offset; - int increment; - LightWeightMap* forceJitOptions; - LightWeightMap* forceJit2Options; - LightWeightMap* jitOptions; - LightWeightMap* jit2Options; + bool skipCleanup = false; // In /parallel mode, do we skip cleanup of temporary files? Used for debugging /parallel. + int workerCount = -1; // Number of workers to use for /parallel mode. -1 (or 1) means don't use parallel mode. + int indexCount = -1; // If indexCount is -1 and hash points to nullptr it means compile all. + int failureLimit = -1; // Number of failures after which bail out the replay/asmdiffs. + int* indexes = nullptr; + char* hash = nullptr; + char* methodStatsTypes = nullptr; + char* details = nullptr; + char* mclFilename = nullptr; + char* targetArchitecture = nullptr; + char* compileList = nullptr; + int offset = -1; + int increment = -1; + LightWeightMap* forceJitOptions = nullptr; + LightWeightMap* forceJit2Options = nullptr; + LightWeightMap* jitOptions = nullptr; + LightWeightMap* jit2Options = nullptr; }; static bool Parse(int argc, char* argv[], /* OUT */ Options* o); diff --git a/src/coreclr/tools/superpmi/superpmi/jitinstance.cpp b/src/coreclr/tools/superpmi/superpmi/jitinstance.cpp index 37825ebf437cf..d13e0c75800c8 100644 --- a/src/coreclr/tools/superpmi/superpmi/jitinstance.cpp +++ b/src/coreclr/tools/superpmi/superpmi/jitinstance.cpp @@ -8,7 +8,6 @@ #include "jithost.h" #include "errorhandling.h" #include "spmiutil.h" -#include "metricssummary.h" JitInstance* JitInstance::InitJit(char* nameOfJit, bool breakOnAssert, @@ -298,28 +297,23 @@ extern "C" DLLEXPORT NOINLINE void Instrumentor_GetInsCount(UINT64* result) } } -JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, int mcIndex, bool collectThroughput, MetricsSummary* metrics, bool* isMinOpts) +ReplayResults JitInstance::CompileMethod(MethodContext* MethodToCompile, int mcIndex, bool collectThroughput) { struct Param : FilterSuperPMIExceptionsParam_CaptureException { JitInstance* pThis; - JitInstance::Result result; CORINFO_METHOD_INFO info; unsigned flags; int mcIndex; bool collectThroughput; - MetricsSummary* metrics; bool* isMinOpts; + ReplayResults results; } param; param.pThis = this; - param.result = RESULT_SUCCESS; // assume success param.flags = 0; param.mcIndex = mcIndex; param.collectThroughput = collectThroughput; - param.metrics = metrics; - param.isMinOpts = isMinOpts; - - *isMinOpts = false; + param.results.Result = ReplayResult::Success; // store to instance field our raw values, so we can figure things out a bit later... mc = MethodToCompile; @@ -342,7 +336,7 @@ JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, i CORJIT_FLAGS jitFlags; pParam->pThis->mc->repGetJitFlags(&jitFlags, sizeof(jitFlags)); - *pParam->isMinOpts = + pParam->results.IsMinOpts = jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE) || jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MIN_OPT) || jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_TIER0); @@ -363,11 +357,11 @@ JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, i CorInfoMethodRuntimeFlags flags = pParam->pThis->mc->cr->repSetMethodAttribs(pParam->info.ftn); if ((flags & CORINFO_FLG_SWITCHED_TO_MIN_OPT) != 0) { - *pParam->isMinOpts = true; + pParam->results.IsMinOpts = true; } else if ((flags & CORINFO_FLG_SWITCHED_TO_OPTIMIZED) != 0) { - *pParam->isMinOpts = false; + pParam->results.IsMinOpts = false; } if (jitResult == CORJIT_SKIPPED) @@ -416,12 +410,12 @@ JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, i pParam->pThis->mc->cr->recMessageLog(jitResult == CORJIT_OK ? "Successful Compile" : "Successful Compile (BADCODE)"); - pParam->metrics->NumCodeBytes += NCodeSizeBlock; + pParam->results.NumCodeBytes = NCodeSizeBlock; } else { LogDebug("compileMethod failed with result %d", jitResult); - pParam->result = RESULT_ERROR; + pParam->results.Result = ReplayResult::Error; } } PAL_EXCEPT_FILTER(FilterSuperPMIExceptions_CaptureExceptionAndStop) @@ -433,7 +427,7 @@ JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, i char* message = e.GetExceptionMessage(); LogMissing("Method context %d failed to replay: %s", mcIndex, message); e.DeleteMessage(); - param.result = RESULT_MISSING; + param.results.Result = ReplayResult::Miss; } else if (e.GetCode() == EXCEPTIONCODE_RECORDED_EXCEPTION) { @@ -452,7 +446,7 @@ JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, i else { e.ShowAndDeleteMessage(); - param.result = RESULT_ERROR; + param.results.Result = ReplayResult::Error; } } PAL_ENDTRY @@ -469,22 +463,8 @@ JitInstance::Result JitInstance::CompileMethod(MethodContext* MethodToCompile, i UINT64 insCountAfter = 0; Instrumentor_GetInsCount(&insCountAfter); - if (param.result == RESULT_SUCCESS) - { - metrics->SuccessfulCompiles++; - metrics->NumExecutedInstructions += static_cast(insCountAfter - insCountBefore); - } - else - { - metrics->FailingCompiles++; - } - - if (param.result == RESULT_MISSING) - { - metrics->MissingCompiles++; - } - - return param.result; + param.results.NumExecutedInstructions = static_cast(insCountAfter - insCountBefore); + return param.results; } void JitInstance::timeResult(CORINFO_METHOD_INFO info, unsigned flags) diff --git a/src/coreclr/tools/superpmi/superpmi/jitinstance.h b/src/coreclr/tools/superpmi/superpmi/jitinstance.h index 35b322d172fc5..42f1f4ade7c46 100644 --- a/src/coreclr/tools/superpmi/superpmi/jitinstance.h +++ b/src/coreclr/tools/superpmi/superpmi/jitinstance.h @@ -9,6 +9,21 @@ #include "methodcontext.h" #include "cycletimer.h" +enum class ReplayResult +{ + Success, + Error, + Miss, +}; + +struct ReplayResults +{ + ReplayResult Result = ReplayResult::Success; + bool IsMinOpts = false; + uint32_t NumCodeBytes = 0; + uint64_t NumExecutedInstructions = 0; +}; + class JitInstance { private: @@ -34,12 +49,6 @@ class JitInstance bool forceClearAltJitFlag; bool forceSetAltJitFlag; - enum Result - { - RESULT_ERROR, - RESULT_SUCCESS, - RESULT_MISSING - }; CycleTimer lt; MethodContext* mc; ULONGLONG times[2]; @@ -60,7 +69,7 @@ class JitInstance bool resetConfig(MethodContext* firstContext); - Result CompileMethod(MethodContext* MethodToCompile, int mcIndex, bool collectThroughput, struct MetricsSummary* metrics, bool* isMinOpts); + ReplayResults CompileMethod(MethodContext* MethodToCompile, int mcIndex, bool collectThroughput); const WCHAR* getForceOption(const WCHAR* key); const WCHAR* getOption(const WCHAR* key); diff --git a/src/coreclr/tools/superpmi/superpmi/metricssummary.cpp b/src/coreclr/tools/superpmi/superpmi/metricssummary.cpp deleted file mode 100644 index 7967cf90293fb..0000000000000 --- a/src/coreclr/tools/superpmi/superpmi/metricssummary.cpp +++ /dev/null @@ -1,118 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#include "standardpch.h" -#include "metricssummary.h" -#include "logging.h" -#include "fileio.h" - -void MetricsSummary::AggregateFrom(const MetricsSummary& other) -{ - SuccessfulCompiles += other.SuccessfulCompiles; - FailingCompiles += other.FailingCompiles; - MissingCompiles += other.MissingCompiles; - NumContextsWithDiffs += other.NumContextsWithDiffs; - NumCodeBytes += other.NumCodeBytes; - NumDiffedCodeBytes += other.NumDiffedCodeBytes; - NumExecutedInstructions += other.NumExecutedInstructions; - NumDiffExecutedInstructions += other.NumDiffExecutedInstructions; -} - -void MetricsSummaries::AggregateFrom(const MetricsSummaries& other) -{ - Overall.AggregateFrom(other.Overall); - MinOpts.AggregateFrom(other.MinOpts); - FullOpts.AggregateFrom(other.FullOpts); -} - -bool MetricsSummaries::SaveToFile(const char* path) -{ - FileWriter file; - if (!FileWriter::CreateNew(path, &file)) - { - return false; - } - - if (!file.Printf( - "Successful compiles,Failing compiles,Missing compiles,Contexts with diffs," - "Code bytes,Diffed code bytes,Executed instructions,Diff executed instructions,Name\n")) - { - return false; - } - - return - WriteRow(file, "Overall", Overall) && - WriteRow(file, "MinOpts", MinOpts) && - WriteRow(file, "FullOpts", FullOpts); -} - -bool MetricsSummaries::WriteRow(FileWriter& fw, const char* name, const MetricsSummary& summary) -{ - return - fw.Printf( - "%d,%d,%d,%d,%lld,%lld,%lld,%lld,%s\n", - summary.SuccessfulCompiles, - summary.FailingCompiles, - summary.MissingCompiles, - summary.NumContextsWithDiffs, - summary.NumCodeBytes, - summary.NumDiffedCodeBytes, - summary.NumExecutedInstructions, - summary.NumDiffExecutedInstructions, - name); -} - -bool MetricsSummaries::LoadFromFile(const char* path, MetricsSummaries* metrics) -{ - FileLineReader reader; - if (!FileLineReader::Open(path, &reader)) - { - return false; - } - - if (!reader.AdvanceLine()) - { - return false; - } - - *metrics = MetricsSummaries(); - bool result = true; - while (reader.AdvanceLine()) - { - MetricsSummary summary; - - char name[32]; - int scanResult = - sscanf_s( - reader.GetCurrentLine(), - "%d,%d,%d,%d,%lld,%lld,%lld,%lld,%s", - &summary.SuccessfulCompiles, - &summary.FailingCompiles, - &summary.MissingCompiles, - &summary.NumContextsWithDiffs, - &summary.NumCodeBytes, - &summary.NumDiffedCodeBytes, - &summary.NumExecutedInstructions, - &summary.NumDiffExecutedInstructions, - name, (unsigned)sizeof(name)); - - if (scanResult == 9) - { - MetricsSummary* tarSummary = nullptr; - if (strcmp(name, "Overall") == 0) - metrics->Overall = summary; - else if (strcmp(name, "MinOpts") == 0) - metrics->MinOpts = summary; - else if (strcmp(name, "FullOpts") == 0) - metrics->FullOpts = summary; - else - result = false; - } - else - { - result = false; - } - } - - return result; -} diff --git a/src/coreclr/tools/superpmi/superpmi/metricssummary.h b/src/coreclr/tools/superpmi/superpmi/metricssummary.h deleted file mode 100644 index 14e364cd9fcf3..0000000000000 --- a/src/coreclr/tools/superpmi/superpmi/metricssummary.h +++ /dev/null @@ -1,45 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -#ifndef _MetricsSummary -#define _MetricsSummary - -struct MetricsSummary -{ - // Number of methods successfully jitted. - int SuccessfulCompiles = 0; - // Number of methods that failed jitting. - int FailingCompiles = 0; - // Number of methods that failed jitting due to missing SPMI data. - int MissingCompiles = 0; - // Number of contexts that had any diff. - int NumContextsWithDiffs = 0; - // Number of code bytes produced by the JIT for the successful compiles. - long long NumCodeBytes = 0; - // Number of code bytes that were diffed with the other compiler in diff mode. - long long NumDiffedCodeBytes = 0; - // Number of executed instructions in successful compiles. - // Requires a dynamic instrumentor to be enabled. - long long NumExecutedInstructions = 0; - // Number of executed instructions inside contexts that were successfully diffed. - long long NumDiffExecutedInstructions = 0; - - void AggregateFrom(const MetricsSummary& other); -}; - -class MetricsSummaries -{ -public: - MetricsSummary Overall; - MetricsSummary MinOpts; - MetricsSummary FullOpts; - - void AggregateFrom(const MetricsSummaries& other); - - bool SaveToFile(const char* path); - static bool LoadFromFile(const char* path, MetricsSummaries* metrics); -private: - static bool WriteRow(class FileWriter& fw, const char* name, const MetricsSummary& summary); -}; - -#endif diff --git a/src/coreclr/tools/superpmi/superpmi/parallelsuperpmi.cpp b/src/coreclr/tools/superpmi/superpmi/parallelsuperpmi.cpp index fbf3c41e14d1b..f5b51fe05b753 100644 --- a/src/coreclr/tools/superpmi/superpmi/parallelsuperpmi.cpp +++ b/src/coreclr/tools/superpmi/superpmi/parallelsuperpmi.cpp @@ -8,7 +8,6 @@ #include "lightweightmap.h" #include "commandline.h" #include "errorhandling.h" -#include "metricssummary.h" #include "fileio.h" // Forward declare the conversion method. Including spmiutil.h pulls in other headers @@ -284,39 +283,6 @@ void ProcessChildStdOut(const CommandLine::Options& o, } } -static bool ProcessChildMetrics( - const char* baseMetricsSummaryPath, - MetricsSummaries* baseMetrics, - const char* diffMetricsSummaryPath, - MetricsSummaries* diffMetrics) -{ - if (baseMetricsSummaryPath != nullptr) - { - MetricsSummaries childBaseMetrics; - if (!MetricsSummaries::LoadFromFile(baseMetricsSummaryPath, &childBaseMetrics)) - { - LogError("Couldn't load base metrics summary created by child process"); - return false; - } - - baseMetrics->AggregateFrom(childBaseMetrics); - } - - if (diffMetricsSummaryPath != nullptr) - { - MetricsSummaries childDiffMetrics; - if (!MetricsSummaries::LoadFromFile(diffMetricsSummaryPath, &childDiffMetrics)) - { - LogError("Couldn't load diff metrics summary created by child process"); - return false; - } - - diffMetrics->AggregateFrom(childDiffMetrics); - } - - return true; -} - #ifndef TARGET_UNIX // TODO-Porting: handle Ctrl-C signals gracefully on Unix BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) { @@ -335,27 +301,13 @@ int __cdecl compareInt(const void* arg1, const void* arg2) struct PerWorkerData { - HANDLE hStdOutput; - HANDLE hStdError; - - char* failingMCListPath; - char* diffsInfoPath; - char* stdOutputPath; - char* stdErrorPath; - char* baseMetricsSummaryPath; - char* diffMetricsSummaryPath; - - PerWorkerData() - : hStdOutput(INVALID_HANDLE_VALUE) - , hStdError(INVALID_HANDLE_VALUE) - , failingMCListPath(nullptr) - , diffsInfoPath(nullptr) - , stdOutputPath(nullptr) - , stdErrorPath(nullptr) - , baseMetricsSummaryPath(nullptr) - , diffMetricsSummaryPath(nullptr) - { - } + HANDLE hStdOutput = INVALID_HANDLE_VALUE; + HANDLE hStdError = INVALID_HANDLE_VALUE; + + char* failingMCListPath = nullptr; + char* detailsPath = nullptr; + char* stdOutputPath = nullptr; + char* stdErrorPath = nullptr; }; static void MergeWorkerMCLs(char* mclFilename, PerWorkerData* workerData, int workerCount, char* PerWorkerData::*mclPath) @@ -573,8 +525,8 @@ int doParallelSuperPMI(CommandLine::Options& o) LogVerbose("Using child (%s) with args (%s)", spmiFilename, spmiArgs); if (o.mclFilename != nullptr) LogVerbose(" failingMCList=%s", o.mclFilename); - if (o.diffsInfo != nullptr) - LogVerbose(" diffsInfo=%s", o.diffsInfo); + if (o.details != nullptr) + LogVerbose(" details=%s", o.details); LogVerbose(" workerCount=%d, skipCleanup=%d.", o.workerCount, o.skipCleanup); PerWorkerData* perWorkerData = new PerWorkerData[o.workerCount]; @@ -596,22 +548,10 @@ int doParallelSuperPMI(CommandLine::Options& o) sprintf_s(wd.failingMCListPath, MAX_PATH, "%sParallelSuperPMI-%u-%d.mcl", tempPath, randNumber, i); } - if (o.diffsInfo != nullptr) + if (o.details != nullptr) { - wd.diffsInfoPath = new char[MAX_PATH]; - sprintf_s(wd.diffsInfoPath, MAX_PATH, "%sParallelSuperPMI-Diff-%u-%d.mcl", tempPath, randNumber, i); - } - - if (o.baseMetricsSummaryFile != nullptr) - { - wd.baseMetricsSummaryPath = new char[MAX_PATH]; - sprintf_s(wd.baseMetricsSummaryPath, MAX_PATH, "%sParallelSuperPMI-BaseMetricsSummary-%u-%d.csv", tempPath, randNumber, i); - } - - if (o.diffMetricsSummaryFile != nullptr) - { - wd.diffMetricsSummaryPath = new char[MAX_PATH]; - sprintf_s(wd.diffMetricsSummaryPath, MAX_PATH, "%sParallelSuperPMI-DiffMetricsSummary-%u-%d.csv", tempPath, randNumber, i); + wd.detailsPath = new char[MAX_PATH]; + sprintf_s(wd.detailsPath, MAX_PATH, "%sParallelSuperPMI-Details-%u-%d.csv", tempPath, randNumber, i); } wd.stdOutputPath = new char[MAX_PATH]; @@ -638,22 +578,10 @@ int doParallelSuperPMI(CommandLine::Options& o) wd.failingMCListPath); } - if (wd.diffsInfoPath != nullptr) - { - bytesWritten += sprintf_s(cmdLine + bytesWritten, MAX_CMDLINE_SIZE - bytesWritten, " -diffsInfo %s", - wd.diffsInfoPath); - } - - if (wd.baseMetricsSummaryPath != nullptr) - { - bytesWritten += sprintf_s(cmdLine + bytesWritten, MAX_CMDLINE_SIZE - bytesWritten, " -baseMetricsSummary %s", - wd.baseMetricsSummaryPath); - } - - if (wd.diffMetricsSummaryPath != nullptr) + if (wd.detailsPath != nullptr) { - bytesWritten += sprintf_s(cmdLine + bytesWritten, MAX_CMDLINE_SIZE - bytesWritten, " -diffMetricsSummary %s", - wd.diffMetricsSummaryPath); + bytesWritten += sprintf_s(cmdLine + bytesWritten, MAX_CMDLINE_SIZE - bytesWritten, " -details %s", + wd.detailsPath); } if (o.failureLimit > 0) @@ -742,8 +670,6 @@ int doParallelSuperPMI(CommandLine::Options& o) bool usageError = false; // variable to flag if we hit a usage error in SuperPMI int loaded = 0, jitted = 0, failed = 0, excluded = 0, missing = 0, diffs = 0; - MetricsSummaries baseMetrics; - MetricsSummaries diffMetrics; // Read the stderr files and log them as errors // Read the stdout files and parse them for counts and log any MISSING or ISSUE errors @@ -752,7 +678,6 @@ int doParallelSuperPMI(CommandLine::Options& o) PerWorkerData& wd = perWorkerData[i]; ProcessChildStdErr(wd.stdErrorPath); ProcessChildStdOut(o, wd.stdOutputPath, &loaded, &jitted, &failed, &excluded, &missing, &diffs, &usageError); - ProcessChildMetrics(wd.baseMetricsSummaryPath, &baseMetrics, wd.diffMetricsSummaryPath, &diffMetrics); if (usageError) break; @@ -764,20 +689,10 @@ int doParallelSuperPMI(CommandLine::Options& o) MergeWorkerMCLs(o.mclFilename, perWorkerData, o.workerCount, &PerWorkerData::failingMCListPath); } - if (o.diffsInfo != nullptr && !usageError) + if (o.details != nullptr && !usageError) { // Concat the resulting diff .mcl files - MergeWorkerCsvs(o.diffsInfo, perWorkerData, o.workerCount, &PerWorkerData::diffsInfoPath); - } - - if (o.baseMetricsSummaryFile != nullptr && !usageError) - { - baseMetrics.SaveToFile(o.baseMetricsSummaryFile); - } - - if (o.diffMetricsSummaryFile != nullptr && !usageError) - { - diffMetrics.SaveToFile(o.diffMetricsSummaryFile); + MergeWorkerCsvs(o.details, perWorkerData, o.workerCount, &PerWorkerData::detailsPath); } if (!usageError) @@ -806,17 +721,9 @@ int doParallelSuperPMI(CommandLine::Options& o) { remove(wd.failingMCListPath); } - if (wd.diffsInfoPath != nullptr) - { - remove(wd.diffsInfoPath); - } - if (wd.baseMetricsSummaryPath != nullptr) - { - remove(wd.baseMetricsSummaryPath); - } - if (wd.diffMetricsSummaryPath != nullptr) + if (wd.detailsPath != nullptr) { - remove(wd.diffMetricsSummaryPath); + remove(wd.detailsPath); } remove(wd.stdOutputPath); remove(wd.stdErrorPath); diff --git a/src/coreclr/tools/superpmi/superpmi/superpmi.cpp b/src/coreclr/tools/superpmi/superpmi/superpmi.cpp index 5be8d10acbfbd..4de5e8d2f65e2 100644 --- a/src/coreclr/tools/superpmi/superpmi/superpmi.cpp +++ b/src/coreclr/tools/superpmi/superpmi/superpmi.cpp @@ -18,7 +18,6 @@ #include "mclist.h" #include "methodstatsemitter.h" #include "spmiutil.h" -#include "metricssummary.h" #include "fileio.h" extern int doParallelSuperPMI(CommandLine::Options& o); @@ -122,19 +121,57 @@ static NearDifferResult InvokeNearDiffer(NearDiffer* nearDiffer, return param.result; } +static const char* ResultToString(ReplayResult result) +{ + switch (result) + { + case ReplayResult::Success: + return "Success"; + case ReplayResult::Error: + return "Error"; + case ReplayResult::Miss: + return "Miss"; + default: + return "Unknown"; + } +} + static bool PrintDiffsCsvHeader(FileWriter& fw) { - return fw.Printf("Context,Context size,Base size,Diff size,Base instructions,Diff instructions\n"); + return fw.Printf("Context,Context size,Base result,Diff result,MinOpts,Has diff,Base size,Diff size,Base instructions,Diff instructions\n"); } static bool PrintDiffsCsvRow( FileWriter& fw, - int context, - uint32_t contextSize, - long long baseSize, long long diffSize, - long long baseInstructions, long long diffInstructions) + int context, uint32_t contextSize, + const ReplayResults& baseRes, + const ReplayResults& diffRes, + bool hasDiff) { - return fw.Printf("%d,%u,%lld,%lld,%lld,%lld\n", context, contextSize, baseSize, diffSize, baseInstructions, diffInstructions); + return fw.Printf("%d,%u,%s,%s,%s,%s,%u,%u,%lld,%lld\n", + context, contextSize, + ResultToString(baseRes.Result), ResultToString(diffRes.Result), + baseRes.IsMinOpts ? "True" : "False", + hasDiff ? "True" : "False", + baseRes.NumCodeBytes, diffRes.NumCodeBytes, + baseRes.NumExecutedInstructions, diffRes.NumExecutedInstructions); +} + +static bool PrintReplayCsvHeader(FileWriter& fw) +{ + return fw.Printf("Context,Context size,Result,MinOpts,Size,Instructions\n"); +} + +static bool PrintReplayCsvRow( + FileWriter& fw, + int context, uint32_t contextSize, + const ReplayResults& res) +{ + return fw.Printf("%d,%u,%s,%s,%u,%lld\n", + context, contextSize, + ResultToString(res.Result), + res.IsMinOpts ? "True" : "False", + res.NumCodeBytes, res.NumExecutedInstructions); } // Run superpmi. The return value is as follows: @@ -161,10 +198,8 @@ int __cdecl main(int argc, char* argv[]) SimpleTimer st3; SimpleTimer st4; st2.Start(); - JitInstance::Result res, res2 = JitInstance::RESULT_ERROR; - HRESULT hr = E_FAIL; - MethodContext* mc = nullptr; - JitInstance * jit = nullptr, *jit2 = nullptr; + JitInstance* jit = nullptr; + JitInstance* jit2 = nullptr; MethodStatsEmitter* methodStatsEmitter = nullptr; #ifdef SuperPMI_ChewMemory @@ -182,7 +217,7 @@ int __cdecl main(int argc, char* argv[]) bool collectThroughput = false; MCList failingToReplayMCL; - FileWriter diffCsv; + FileWriter detailsCsv; CommandLine::Options o; if (!CommandLine::Parse(argc, argv, &o)) @@ -239,11 +274,11 @@ int __cdecl main(int argc, char* argv[]) { failingToReplayMCL.InitializeMCL(o.mclFilename); } - if (o.diffsInfo != nullptr) + if (o.details != nullptr) { - if (!FileWriter::CreateNew(o.diffsInfo, &diffCsv)) + if (!FileWriter::CreateNew(o.details, &detailsCsv)) { - LogError("Could not create file %s", o.diffsInfo); + LogError("Could not create file %s", o.details); return (int)SpmiResult::GeneralFailure; } } @@ -264,6 +299,7 @@ int __cdecl main(int argc, char* argv[]) int failToReplayCount = 0; int errorCount = 0; int errorCount2 = 0; + int diffsCount = 0; int missingCount = 0; int index = 0; int excludedCount = 0; @@ -279,14 +315,18 @@ int __cdecl main(int argc, char* argv[]) } } - if (o.diffsInfo != nullptr) + if (o.details != nullptr) { - PrintDiffsCsvHeader(diffCsv); + if (o.applyDiff) + { + PrintDiffsCsvHeader(detailsCsv); + } + else + { + PrintReplayCsvHeader(detailsCsv); + } } - MetricsSummaries totalBaseMetrics; - MetricsSummaries totalDiffMetrics; - while (true) { MethodContextBuffer mcb = reader->GetNextMethodContext(); @@ -305,7 +345,7 @@ int __cdecl main(int argc, char* argv[]) if (o.applyDiff) { LogVerbose(" %2.1f%% - Loaded %d Jitted %d Diffs %d FailedCompile %d at %d per second", - reader->PercentComplete(), loadedCount, jittedCount, totalBaseMetrics.Overall.NumContextsWithDiffs, + reader->PercentComplete(), loadedCount, jittedCount, diffsCount, failToReplayCount, (int)((double)500 / st1.GetSeconds())); } else @@ -321,6 +361,7 @@ int __cdecl main(int argc, char* argv[]) loadedCount++; const int mcIndex = reader->GetMethodContextIndex(); + MethodContext* mc = nullptr; if (!MethodContext::Initialize(mcIndex, mcb.buff, mcb.size, &mc)) { return (int)SpmiResult::GeneralFailure; @@ -386,30 +427,21 @@ int __cdecl main(int argc, char* argv[]) } } - MetricsSummary baseMetrics; - bool isMinOpts; jittedCount++; st3.Start(); - res = jit->CompileMethod(mc, reader->GetMethodContextIndex(), collectThroughput, &baseMetrics, &isMinOpts); + ReplayResults res = jit->CompileMethod(mc, reader->GetMethodContextIndex(), collectThroughput); st3.Stop(); LogDebug("Method %d compiled%s in %fms, result %d", reader->GetMethodContextIndex(), (o.nameOfJit2 == nullptr) ? "" : " by JIT1", st3.GetMilliseconds(), res); - MetricsSummary& totalBaseMetricsOpts = isMinOpts ? totalBaseMetrics.MinOpts : totalBaseMetrics.FullOpts; - MetricsSummary& totalDiffMetricsOpts = isMinOpts ? totalDiffMetrics.MinOpts : totalDiffMetrics.FullOpts; - - totalBaseMetrics.Overall.AggregateFrom(baseMetrics); - - if (res == JitInstance::RESULT_SUCCESS) + if (res.Result == ReplayResult::Success) { - totalBaseMetricsOpts.AggregateFrom(baseMetrics); - if (Logger::IsLogLevelEnabled(LOGLEVEL_DEBUG)) { mc->cr->dumpToConsole(); // Dump the compile results if doing debug logging } } - else if (res == JitInstance::RESULT_ERROR) + else if (res.Result == ReplayResult::Success) { errorCount++; LogError("Method %d of size %d failed to load and compile correctly%s (%s).", @@ -448,9 +480,8 @@ int __cdecl main(int argc, char* argv[]) } } - MetricsSummary diffMetrics; - - res2 = JitInstance::RESULT_SUCCESS; + ReplayResults res2; + res2.Result = ReplayResult::Success; if (o.nameOfJit2 != nullptr) { // Lets get the results for the 2nd JIT @@ -459,25 +490,20 @@ int __cdecl main(int argc, char* argv[]) crl = mc->cr; mc->cr = new CompileResult(); - bool isMinOptsDiff; st4.Start(); - res2 = jit2->CompileMethod(mc, reader->GetMethodContextIndex(), collectThroughput, &diffMetrics, &isMinOptsDiff); + res2 = jit2->CompileMethod(mc, reader->GetMethodContextIndex(), collectThroughput); st4.Stop(); LogDebug("Method %d compiled by JIT2 in %fms, result %d", reader->GetMethodContextIndex(), st4.GetMilliseconds(), res2); - totalDiffMetrics.Overall.AggregateFrom(diffMetrics); - - if (res2 == JitInstance::RESULT_SUCCESS) + if (res2.Result == ReplayResult::Success) { - totalDiffMetricsOpts.AggregateFrom(diffMetrics); - if (Logger::IsLogLevelEnabled(LOGLEVEL_DEBUG)) { mc->cr->dumpToConsole(); // Dump the compile results if doing debug logging } } - else if (res2 == JitInstance::RESULT_ERROR) + else if (res2.Result == ReplayResult::Error) { errorCount2++; LogError("Method %d of size %d failed to load and compile correctly by JIT2 (%s).", @@ -490,11 +516,11 @@ int __cdecl main(int argc, char* argv[]) } } - if ((res == JitInstance::RESULT_SUCCESS) && (res2 == JitInstance::RESULT_SUCCESS)) + if ((res.Result == ReplayResult::Success) && (res2.Result == ReplayResult::Success)) { if (collectThroughput) { - if (o.nameOfJit2 != nullptr && res2 == JitInstance::RESULT_SUCCESS) + if ((o.nameOfJit2 != nullptr) && (res2.Result == ReplayResult::Success)) { // TODO-Bug?: bug in getting the lowest cycle time?? ULONGLONG dif1, dif2, dif3, dif4; @@ -589,6 +615,7 @@ int __cdecl main(int argc, char* argv[]) if (o.applyDiff) { + NearDifferResult diffResult = NearDifferResult::Failure; // We need at least two compile results to diff: they can either both come from JIT // invocations, or one can be loaded from the method context file. @@ -600,30 +627,16 @@ int __cdecl main(int argc, char* argv[]) } else { - NearDifferResult result = InvokeNearDiffer(&nearDiffer, &mc, &crl, &reader); + diffResult = InvokeNearDiffer(&nearDiffer, &mc, &crl, &reader); - switch (result) + switch (diffResult) { case NearDifferResult::SuccessWithDiff: - totalBaseMetrics.Overall.NumContextsWithDiffs++; - totalDiffMetrics.Overall.NumContextsWithDiffs++; - - totalBaseMetricsOpts.NumContextsWithDiffs++; - totalDiffMetricsOpts.NumContextsWithDiffs++; - + diffsCount++; // This is a difference in ASM outputs from Jit1 & Jit2 and not a playback failure - // We will add this MC to the diffs info if there is one. - // Otherwise this will end up in failingMCList - if (o.diffsInfo != nullptr) - { - PrintDiffsCsvRow( - diffCsv, - reader->GetMethodContextIndex(), - mcb.size, - baseMetrics.NumCodeBytes, diffMetrics.NumCodeBytes, - baseMetrics.NumExecutedInstructions, diffMetrics.NumExecutedInstructions); - } - else if (o.mclFilename != nullptr) + // We will add this MC to the details if there is one below. + // Otherwise add it in the failingMCList here. + if (o.details == nullptr) { failingToReplayMCL.AddMethodToMCL(reader->GetMethodContextIndex()); } @@ -637,18 +650,27 @@ int __cdecl main(int argc, char* argv[]) break; } + } - totalBaseMetrics.Overall.NumDiffedCodeBytes += baseMetrics.NumCodeBytes; - totalDiffMetrics.Overall.NumDiffedCodeBytes += diffMetrics.NumCodeBytes; - - totalBaseMetricsOpts.NumDiffedCodeBytes += baseMetrics.NumCodeBytes; - totalDiffMetricsOpts.NumDiffedCodeBytes += diffMetrics.NumCodeBytes; - - totalBaseMetrics.Overall.NumDiffExecutedInstructions += baseMetrics.NumExecutedInstructions; - totalDiffMetrics.Overall.NumDiffExecutedInstructions += diffMetrics.NumExecutedInstructions; - - totalBaseMetricsOpts.NumDiffExecutedInstructions += baseMetrics.NumExecutedInstructions; - totalDiffMetricsOpts.NumDiffExecutedInstructions += diffMetrics.NumExecutedInstructions; + if (o.details != nullptr) + { + PrintDiffsCsvRow( + detailsCsv, + reader->GetMethodContextIndex(), + mcb.size, + res, res2, + /* hasDiff */ diffResult != NearDifferResult::SuccessWithoutDiff); + } + } + else + { + if (o.details != nullptr) + { + PrintReplayCsvRow( + detailsCsv, + reader->GetMethodContextIndex(), + mcb.size, + res); } } } @@ -663,10 +685,26 @@ int __cdecl main(int argc, char* argv[]) failingToReplayMCL.AddMethodToMCL(reader->GetMethodContextIndex()); } - if ((res == JitInstance::RESULT_MISSING) || (res2 == JitInstance::RESULT_MISSING)) + if ((res.Result == ReplayResult::Miss) || (res2.Result == ReplayResult::Miss)) { missingCount++; } + + if (o.details != nullptr) + { + if (o.applyDiff) + { + PrintDiffsCsvRow( + detailsCsv, + reader->GetMethodContextIndex(), mcb.size, + res, res2, + /* hasDiff */ false); + } + else + { + PrintReplayCsvRow(detailsCsv, reader->GetMethodContextIndex(), mcb.size, res); + } + } } delete crl; @@ -678,7 +716,7 @@ int __cdecl main(int argc, char* argv[]) if (o.applyDiff) { LogInfo(g_AsmDiffsSummaryFormatString, loadedCount, jittedCount, failToReplayCount, excludedCount, - missingCount, totalDiffMetrics.Overall.NumContextsWithDiffs); + missingCount, diffsCount); } else { @@ -688,16 +726,6 @@ int __cdecl main(int argc, char* argv[]) st2.Stop(); LogVerbose("Total time: %fms", st2.GetMilliseconds()); - if (o.baseMetricsSummaryFile != nullptr) - { - totalBaseMetrics.SaveToFile(o.baseMetricsSummaryFile); - } - - if (o.diffMetricsSummaryFile != nullptr) - { - totalDiffMetrics.SaveToFile(o.diffMetricsSummaryFile); - } - if (methodStatsEmitter != nullptr) { delete methodStatsEmitter; @@ -715,7 +743,7 @@ int __cdecl main(int argc, char* argv[]) { result = SpmiResult::Error; } - else if (o.applyDiff && (totalDiffMetrics.Overall.NumContextsWithDiffs > 0)) + else if (o.applyDiff && (diffsCount > 0)) { result = SpmiResult::Diffs; } diff --git a/src/coreclr/unwinder/arm64/unwinder.cpp b/src/coreclr/unwinder/arm64/unwinder.cpp index 6a4c25ef77698..8096204dffa6e 100644 --- a/src/coreclr/unwinder/arm64/unwinder.cpp +++ b/src/coreclr/unwinder/arm64/unwinder.cpp @@ -9,6 +9,61 @@ #include "unwinder.h" +#define NOTHING + +#define ARM64_CONTEXT T_CONTEXT + +#ifndef HOST_ARM64 +#define CONTEXT T_CONTEXT +#define PCONTEXT PT_CONTEXT +#define KNONVOLATILE_CONTEXT_POINTERS T_KNONVOLATILE_CONTEXT_POINTERS +#define PKNONVOLATILE_CONTEXT_POINTERS PT_KNONVOLATILE_CONTEXT_POINTERS +#define RUNTIME_FUNCTION T_RUNTIME_FUNCTION +#define PRUNTIME_FUNCTION PT_RUNTIME_FUNCTION +#endif + +#ifndef __in +#define __in _In_ +#define __out _Out_ +#endif + +#ifndef FIELD_OFFSET +#define FIELD_OFFSET(type, field) ((LONG)__builtin_offsetof(type, field)) +#endif + +#ifdef HOST_UNIX +#define RtlZeroMemory ZeroMemory + +typedef enum ARM64_FNPDATA_FLAGS { + PdataRefToFullXdata = 0, + PdataPackedUnwindFunction = 1, + PdataPackedUnwindFragment = 2, +} ARM64_FNPDATA_FLAGS; + +typedef enum ARM64_FNPDATA_CR { + PdataCrUnchained = 0, + PdataCrUnchainedSavedLr = 1, + PdataCrChainedWithPac = 2, + PdataCrChained = 3, +} ARM64_FNPDATA_CR; + +#endif // HOST_UNIX + +// +// MessageId: STATUS_BAD_FUNCTION_TABLE +// +// MessageText: +// +// A malformed function table was encountered during an unwind operation. +// +#define STATUS_BAD_FUNCTION_TABLE ((NTSTATUS)0xC00000FFL) + +// +// Flags for RtlVirtualUnwind2. +// + +#define RTL_VIRTUAL_UNWIND2_VALIDATE_PAC 0x00000001UL + typedef struct _ARM64_KTRAP_FRAME { // @@ -90,6 +145,8 @@ typedef struct _ARM64_VFP_STATE NEON128 V[32]; // All V registers (0-31) } ARM64_VFP_STATE, *PARM64_VFP_STATE, KARM64_VFP_STATE, *PKARM64_VFP_STATE; +#define RTL_VIRTUAL_UNWIND_VALID_FLAGS_ARM64 (RTL_VIRTUAL_UNWIND2_VALIDATE_PAC) + // // Parameters describing the unwind codes. // @@ -101,46 +158,109 @@ typedef struct _ARM64_VFP_STATE // // Macros for accessing memory. These can be overridden if other code // (in particular the debugger) needs to use them. +// + +// +// Macros for accessing memory. These can be overridden if other code +// (in particular the debugger) needs to use them. + +#if !defined(DEBUGGER_UNWIND) #define MEMORY_READ_BYTE(params, addr) (*dac_cast(addr)) +#define MEMORY_READ_WORD(params, addr) (*dac_cast(addr)) #define MEMORY_READ_DWORD(params, addr) (*dac_cast(addr)) #define MEMORY_READ_QWORD(params, addr) (*dac_cast(addr)) +#endif + +// +// ARM64_UNWIND_PARAMS definition. This is the kernel-specific definition, +// and contains information on the original PC, the stack bounds, and +// a pointer to the non-volatile context pointer array. Any usage of +// these fields must be wrapped in a macro so that the debugger can take +// a direct drop of this code and use it. +// + +#if !defined(DEBUGGER_UNWIND) + typedef struct _ARM64_UNWIND_PARAMS { - PT_KNONVOLATILE_CONTEXT_POINTERS ContextPointers; + ULONG_PTR ControlPc; + PULONG_PTR LowLimit; + PULONG_PTR HighLimit; + PKNONVOLATILE_CONTEXT_POINTERS ContextPointers; } ARM64_UNWIND_PARAMS, *PARM64_UNWIND_PARAMS; #define UNWIND_PARAMS_SET_TRAP_FRAME(Params, Address, Size) -#define UPDATE_CONTEXT_POINTERS(Params, RegisterNumber, Address) \ -do { \ - if (ARGUMENT_PRESENT(Params)) { \ - PT_KNONVOLATILE_CONTEXT_POINTERS ContextPointers = (Params)->ContextPointers; \ - if (ARGUMENT_PRESENT(ContextPointers)) { \ - if (RegisterNumber >= 19 && RegisterNumber <= 30) { \ - (&ContextPointers->X19)[RegisterNumber - 19] = (PDWORD64)Address; \ - } \ - } \ - } \ +#if !defined(UPDATE_CONTEXT_POINTERS) +#define UPDATE_CONTEXT_POINTERS(Params, RegisterNumber, Address) \ +do { \ + PKNONVOLATILE_CONTEXT_POINTERS ContextPointers = (Params)->ContextPointers; \ + if (ARGUMENT_PRESENT(ContextPointers)) { \ + if (RegisterNumber >= 19 && RegisterNumber <= 28) { \ + (&ContextPointers->X19)[RegisterNumber - 19] = (PULONG64)Address; \ + } else if (RegisterNumber == 29) { \ + ContextPointers->Fp = (PULONG64)Address; \ + } else if (RegisterNumber == 30) { \ + ContextPointers->Lr = (PULONG64)Address; \ + } \ + } \ } while (0) +#endif // !defined(UPDATE_CONTEXT_POINTERS) + +#if !defined(UPDATE_FP_CONTEXT_POINTERS) +#define UPDATE_FP_CONTEXT_POINTERS(Params, RegisterNumber, Address) \ +do { \ + PKNONVOLATILE_CONTEXT_POINTERS ContextPointers = (Params)->ContextPointers; \ + if (ARGUMENT_PRESENT(ContextPointers) && \ + (RegisterNumber >= 8) && \ + (RegisterNumber <= 15)) { \ + \ + (&ContextPointers->D8)[RegisterNumber - 8] = (PULONG64)Address; \ + } \ +} while (0) +#endif // !defined(UPDATE_FP_CONTEXT_POINTERS) + +#if !defined(VALIDATE_STACK_ADDRESS_EX) +#define VALIDATE_STACK_ADDRESS_EX(Params, Context, Address, DataSize, Alignment, OutStatus) +#endif // !defined(VALIDATE_STACK_ADDRESS_EX) +#if !defined(VALIDATE_STACK_ADDRESS) +#define VALIDATE_STACK_ADDRESS(Params, Context, DataSize, Alignment, OutStatus) \ + VALIDATE_STACK_ADDRESS_EX(Params, Context, (Context)->Sp, DataSize, Alignment, OutStatus) +#endif // !defined(VALIDATE_STACK_ADDRESS) -#define UPDATE_FP_CONTEXT_POINTERS(Params, RegisterNumber, Address) \ -do { \ - if (ARGUMENT_PRESENT(Params)) { \ - PT_KNONVOLATILE_CONTEXT_POINTERS ContextPointers = (Params)->ContextPointers; \ - if (ARGUMENT_PRESENT(ContextPointers) && \ - (RegisterNumber >= 8) && \ - (RegisterNumber <= 15)) { \ - \ - (&ContextPointers->D8)[RegisterNumber - 8] = (PDWORD64)Address; \ - } \ - } \ -} while (0) +#else // !defined(DEBUGGER_UNWIND) + +#if !defined(UPDATE_CONTEXT_POINTERS) +#define UPDATE_CONTEXT_POINTERS(Params, RegisterNumber, Address) +#endif // !defined(UPDATE_CONTEXT_POINTERS) + +#if !defined(UPDATE_FP_CONTEXT_POINTERS) +#define UPDATE_FP_CONTEXT_POINTERS(Params, RegisterNumber, Address) +#endif // !defined(UPDATE_FP_CONTEXT_POINTERS) +#if !defined(VALIDATE_STACK_ADDRESS_EX) #define VALIDATE_STACK_ADDRESS_EX(Params, Context, Address, DataSize, Alignment, OutStatus) +#endif // !defined(VALIDATE_STACK_ADDRESS_EX) + +#if !defined(VALIDATE_STACK_ADDRESS) #define VALIDATE_STACK_ADDRESS(Params, Context, DataSize, Alignment, OutStatus) +#endif // !defined(VALIDATE_STACK_ADDRESS) + +#endif // !defined(DEBUGGER_UNWIND) + +// +// Macros for stripping pointer authentication (PAC) bits. +// + +#if !defined(DEBUGGER_STRIP_PAC) + +// NOTE: Pointer authentication is not used by .NET, so the implementation does nothing +#define STRIP_PAC(Params, pointer) + +#endif // // Macros to clarify opcode parsing @@ -149,336 +269,1024 @@ do { #define OPCODE_IS_END(Op) (((Op) & 0xfe) == 0xe4) // -// This table describes the size of each unwind code, in bytes +// This table describes the size of each unwind code, in bytes, for unwind codes +// in the range 0xE0-0xFF. // -static const BYTE UnwindCodeSizeTable[256] = +static const BYTE UnwindCodeSizeTable[32] = { - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, - 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, - 4,1,2,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1 + 4,1,2,1,1,1,1,3, 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, 2,3,4,5,1,1,1,1 }; -NTSTATUS -RtlpUnwindCustom( - __inout PT_CONTEXT ContextRecord, - _In_ BYTE Opcode, - _In_ PARM64_UNWIND_PARAMS UnwindParams - ) +// +// This table describes the number of instructions represented by each unwind +// code in the range 0xE0-0xFF. +// -/*++ +static const BYTE UnwindCodeInstructionCountTable[32] = +{ + 1,1,1,1,1,1,1,1, // 0xE0-0xE7 + 0, // 0xE8 - MSFT_OP_TRAP_FRAME + 0, // 0xE9 - MSFT_OP_MACHINE_FRAME + 0, // 0xEA - MSFT_OP_CONTEXT + 0, // 0xEB - MSFT_OP_EC_CONTEXT / MSFT_OP_RET_TO_GUEST (unused) + 0, // 0xEC - MSFT_OP_CLEAR_UNWOUND_TO_CALL + 0, // 0XED - MSFT_OP_RET_TO_GUEST_LEAF (unused) + 0,0, // 0xEE-0xEF + 0,0,0,0,0,0,0,0, // 0xF0-0xF7 + 1,1,1,1,1,1,1,1 // 0xF8-0xFF +}; -Routine Description: +#if !defined(ALIGN_DOWN_BY) - Handles custom unwinding operations involving machine-specific - frames. +#define ALIGN_DOWN_BY(length, alignment) \ + ((ULONG_PTR)(length) & ~((ULONG_PTR)(alignment) - 1)) -Arguments: +#endif - ContextRecord - Supplies the address of a context record. +#if !defined(ALIGN_UP_BY) - Opcode - The opcode to decode. +#define ALIGN_UP_BY(length, alignment) \ + (ALIGN_DOWN_BY(((ULONG_PTR)(length) + (alignment) - 1), alignment)) - UnwindParams - Additional parameters shared with caller. +#endif -Return Value: +#define OP_BUFFER_PRE_ADJUST(_sav_slot, _slots) {} +#define OP_BUFFER_POST_ADJUST(_sav_slot, _slots) {(_sav_slot) += (_slots);} - An NTSTATUS indicating either STATUS_SUCCESS if everything went ok, or - another status code if there were problems. +#define DBG_OP(...) ---*/ +#pragma warning(push) +#pragma warning(disable:4214) // bit field types other than int +#pragma warning(disable:4201) // nameless struct/union +#pragma warning(disable:4309) // truncation of constant value -{ - ULONG Fpcr; - ULONG Fpsr; - ULONG RegIndex; - ULONG_PTR SourceAddress; - ULONG_PTR StartingSp; - NTSTATUS Status; - ULONG_PTR VfpStateAddress; +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wbitfield-constant-conversion" +#endif - StartingSp = ContextRecord->Sp; - Status = STATUS_SUCCESS; +void emit_save_fplr(char** buff, LONG offset) { + union uop { + char val; + struct { + char z : 6; // pair at[sp + #Z * 8], offset <= 504 + char fixed : 2; + }; + }; - // - // The opcode describes the special-case stack - // + union uop *op; - switch (Opcode) - { + OP_BUFFER_PRE_ADJUST(*buff, 1); - // - // Trap frame case - // + offset = ((offset)/8); + op = (union uop*)(*buff); + op->fixed = 1; + op->z = (char)offset; - case 0xe8: // MSFT_OP_TRAP_FRAME: + OP_BUFFER_POST_ADJUST(*buff, 1); +} - // - // Ensure there is enough valid space for the trap frame - // +void emit_save_fplr_x(char** buff, LONG offset) { + union uop { + char val; + struct { + char z : 6; // pair at [sp-(#Z+1)*8]!, pre-indexed offset >= -512 + char fixed : 2; + }; + }; - VALIDATE_STACK_ADDRESS(UnwindParams, ContextRecord, sizeof(ARM64_KTRAP_FRAME), 16, &Status); - if (!NT_SUCCESS(Status)) { - return Status; - } + union uop* op; - // - // Restore X0-X17, and D0-D7 - // + OP_BUFFER_PRE_ADJUST(*buff, 1); - SourceAddress = StartingSp + offsetof(ARM64_KTRAP_FRAME, X); - for (RegIndex = 0; RegIndex < 18; RegIndex++) { - UPDATE_CONTEXT_POINTERS(UnwindParams, RegIndex, SourceAddress); -#ifdef __GNUC__ - *(&ContextRecord->X0 + RegIndex) = MEMORY_READ_QWORD(UnwindParams, SourceAddress); -#else - ContextRecord->X[RegIndex] = MEMORY_READ_QWORD(UnwindParams, SourceAddress); -#endif - SourceAddress += sizeof(ULONG_PTR); - } + offset = ((-offset)/8)-1; + op = (union uop*)(*buff); + op->fixed = 2; + op->z = (char)offset; - SourceAddress = StartingSp + offsetof(ARM64_KTRAP_FRAME, VfpState); - VfpStateAddress = MEMORY_READ_QWORD(UnwindParams, SourceAddress); - if (VfpStateAddress != 0) { + OP_BUFFER_POST_ADJUST(*buff, 1); +} - SourceAddress = VfpStateAddress + offsetof(KARM64_VFP_STATE, Fpcr); - Fpcr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); - SourceAddress = VfpStateAddress + offsetof(KARM64_VFP_STATE, Fpsr); - Fpsr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); - if (Fpcr != (ULONG)-1 && Fpsr != (ULONG)-1) { +void emit_save_regp(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 6; + short x : 4; // save r(19 + #X) pair at[sp + #Z * 8], offset <= 504 + short fixed : 6; + }; + }; - ContextRecord->Fpcr = Fpcr; - ContextRecord->Fpsr = Fpsr; + union uop* op; - SourceAddress = VfpStateAddress + offsetof(KARM64_VFP_STATE, V); - for (RegIndex = 0; RegIndex < 32; RegIndex++) { - UPDATE_FP_CONTEXT_POINTERS(UnwindParams, RegIndex, SourceAddress); - ContextRecord->V[RegIndex].Low = MEMORY_READ_QWORD(UnwindParams, SourceAddress); - ContextRecord->V[RegIndex].High = MEMORY_READ_QWORD(UnwindParams, SourceAddress + 8); - SourceAddress += 2 * sizeof(ULONGLONG); - } - } - } + OP_BUFFER_PRE_ADJUST(*buff, 2); - // - // Restore R11, R12, SP, LR, PC, and the status registers - // + offset = ((offset)/8); + op = (union uop*)(*buff); + op->fixed = 0x32; + op->x = (short)reg; + op->z = (short)offset; - SourceAddress = StartingSp + offsetof(ARM64_KTRAP_FRAME, Spsr); - ContextRecord->Cpsr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + OP_BUFFER_POST_ADJUST(*buff, 2); +} - SourceAddress = StartingSp + offsetof(ARM64_KTRAP_FRAME, Sp); - ContextRecord->Sp = MEMORY_READ_QWORD(UnwindParams, SourceAddress); +void emit_save_regp_x(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 6; + short x : 4; // save pair r(19+#X) at [sp-(#Z+1)*8]!, pre-indexed offset >= -512 + short fixed : 6; + }; + }; - SourceAddress = StartingSp + offsetof(ARM64_KTRAP_FRAME, Lr); - ContextRecord->Lr = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + union uop* op; - SourceAddress = StartingSp + offsetof(ARM64_KTRAP_FRAME, Fp); - ContextRecord->Fp = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + OP_BUFFER_PRE_ADJUST(*buff, 2); - SourceAddress = StartingSp + offsetof(ARM64_KTRAP_FRAME, Pc); - ContextRecord->Pc = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + offset = ((-offset)/8)-1; + op = (union uop*)(*buff); + op->fixed = 0x33; + op->x = (short)reg; + op->z = (short)offset; - // - // Set the trap frame and clear the unwound-to-call flag - // + OP_BUFFER_POST_ADJUST(*buff, 2); +} - UNWIND_PARAMS_SET_TRAP_FRAME(UnwindParams, StartingSp, sizeof(ARM64_KTRAP_FRAME)); - ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL; - break; +void emit_save_reg(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 6; + short x : 4; // save reg r(19+#X) at [sp+#Z*8], offset <= 504 + short fixed : 6; + }; + }; - // - // Context case - // + union uop* op; - case 0xea: // MSFT_OP_CONTEXT: + OP_BUFFER_PRE_ADJUST(*buff, 2); - // - // Ensure there is enough valid space for the full CONTEXT structure - // + offset = ((offset)/8); + op = (union uop*)(*buff); + op->fixed = 0x34; + op->x = (short)reg; + op->z = (short)offset; - VALIDATE_STACK_ADDRESS(UnwindParams, ContextRecord, sizeof(CONTEXT), 16, &Status); - if (!NT_SUCCESS(Status)) { - return Status; - } + OP_BUFFER_POST_ADJUST(*buff, 2); +} - // - // Restore X0-X28, and D0-D31 - // +void emit_save_reg_x(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 5; + short x : 4; // save reg r(19+#X) at [sp-(#Z+1)*8]!, pre-indexed offset >= -256 + short fixed : 7; + }; + }; - SourceAddress = StartingSp + offsetof(T_CONTEXT, X0); - for (RegIndex = 0; RegIndex < 29; RegIndex++) { - UPDATE_CONTEXT_POINTERS(UnwindParams, RegIndex, SourceAddress); -#ifdef __GNUC__ - *(&ContextRecord->X0 + RegIndex) = MEMORY_READ_QWORD(UnwindParams, SourceAddress); -#else - ContextRecord->X[RegIndex] = MEMORY_READ_QWORD(UnwindParams, SourceAddress); -#endif - SourceAddress += sizeof(ULONG_PTR); - } + union uop* op; - SourceAddress = StartingSp + offsetof(T_CONTEXT, V); - for (RegIndex = 0; RegIndex < 32; RegIndex++) { - UPDATE_FP_CONTEXT_POINTERS(UnwindParams, RegIndex, SourceAddress); - ContextRecord->V[RegIndex].Low = MEMORY_READ_QWORD(UnwindParams, SourceAddress); - ContextRecord->V[RegIndex].High = MEMORY_READ_QWORD(UnwindParams, SourceAddress + 8); - SourceAddress += 2 * sizeof(ULONGLONG); - } + OP_BUFFER_PRE_ADJUST(*buff, 2); - // - // Restore SP, LR, PC, and the status registers - // + offset = ((-offset)/8)-1; + op = (union uop*)(*buff); + op->fixed = 0x6A; + op->x = (short)reg; + op->z = (short)offset; - SourceAddress = StartingSp + offsetof(T_CONTEXT, Cpsr); - ContextRecord->Cpsr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + OP_BUFFER_POST_ADJUST(*buff, 2); +} - SourceAddress = StartingSp + offsetof(T_CONTEXT, Fp); - ContextRecord->Fp = MEMORY_READ_QWORD(UnwindParams, SourceAddress); +void emit_save_lrpair(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 6; + short x : 3; // save pair at [sp+#Z*8], offset <= 504 + short fixed : 7; + }; + }; - SourceAddress = StartingSp + offsetof(T_CONTEXT, Lr); - ContextRecord->Lr = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + union uop* op; - SourceAddress = StartingSp + offsetof(T_CONTEXT, Sp); - ContextRecord->Sp = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + OP_BUFFER_PRE_ADJUST(*buff, 2); - SourceAddress = StartingSp + offsetof(T_CONTEXT, Pc); - ContextRecord->Pc = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + offset = ((offset)/8); + op = (union uop*)(*buff); + op->fixed = 0x6B; + op->x = (short)(reg / 2); + op->z = (short)offset; - SourceAddress = StartingSp + offsetof(T_CONTEXT, Fpcr); - ContextRecord->Fpcr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + OP_BUFFER_POST_ADJUST(*buff, 2); +} - SourceAddress = StartingSp + offsetof(T_CONTEXT, Fpsr); - ContextRecord->Fpsr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); +void emit_save_fregp(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 6; + short x : 3; // save pair d(8+#X) at [sp+#Z*8], offset <= 504 + short fixed : 7; + }; + }; - // - // Inherit the unwound-to-call flag from this context - // + union uop* op; - SourceAddress = StartingSp + offsetof(T_CONTEXT, ContextFlags); - ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL; - ContextRecord->ContextFlags |= - MEMORY_READ_DWORD(UnwindParams, SourceAddress) & CONTEXT_UNWOUND_TO_CALL; - break; + OP_BUFFER_PRE_ADJUST(*buff, 2); - default: - return STATUS_UNSUCCESSFUL; - } + offset = ((offset)/8); + op = (union uop*)(*buff); + op->fixed = 0x6C; + op->x = (short)reg; + op->z = (short)offset; - return STATUS_SUCCESS; + OP_BUFFER_POST_ADJUST(*buff, 2); } -ULONG -RtlpComputeScopeSize( - _In_ ULONG_PTR UnwindCodePtr, - _In_ ULONG_PTR UnwindCodesEndPtr, - _In_ BOOLEAN IsEpilog, - _In_ PARM64_UNWIND_PARAMS UnwindParams - ) +void emit_save_fregp_x(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 6; + short x : 3; // save pair d(8 + #X), at[sp - (#Z + 1) * 8]!, pre - indexed offset >= -512 + short fixed : 7; + }; + }; -/*++ + union uop* op; -Routine Description: + OP_BUFFER_PRE_ADJUST(*buff, 2); - Computes the size of an prolog or epilog, in words. + offset = ((-offset)/8)-1; + op = (union uop*)(*buff); + op->fixed = 0x6D; + op->x = (short)reg; + op->z = (short)offset; -Arguments: + OP_BUFFER_POST_ADJUST(*buff, 2); +} - UnwindCodePtr - Supplies a pointer to the start of the unwind - code sequence. +void emit_save_freg(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 6; + short x : 3; // save reg d(8+#X) at [sp+#Z*8], offset <= 504 + short fixed : 7; + }; + }; - UnwindCodesEndPtr - Supplies a pointer to the byte immediately - following the unwind code table, as described by the header. + union uop* op; - IsEpilog - Specifies TRUE if the scope describes an epilog, - or FALSE if it describes a prolog. + OP_BUFFER_PRE_ADJUST(*buff, 2); - UnwindParams - Additional parameters shared with caller. + offset = ((offset)/8); + op = (union uop*)(*buff); + op->fixed = 0x6E; + op->x = (short)reg; + op->z = (short)offset; -Return Value: + OP_BUFFER_POST_ADJUST(*buff, 2); +} - The size of the scope described by the unwind codes, in halfword units. +void emit_save_freg_x(char** buff, LONG reg, LONG offset) { + union uop { + short val; + struct { + short z : 5; + short x : 3; // save reg d(8+#X) at [sp-(#Z+1)*8]!, pre-indexed offset >= -256 + short fixed : 8; + }; + }; ---*/ + union uop* op; -{ - ULONG ScopeSize; - BYTE Opcode; + OP_BUFFER_PRE_ADJUST(*buff, 2); - // - // Iterate through the unwind codes until we hit an end marker. - // While iterating, accumulate the total scope size. - // + offset = ((-offset)/8)-1; + op = (union uop*)(*buff); + op->fixed = 0xDE; + op->x = (short)reg; + op->z = (short)offset; - ScopeSize = 0; - Opcode = 0; - while (UnwindCodePtr < UnwindCodesEndPtr) { - Opcode = MEMORY_READ_BYTE(UnwindParams, UnwindCodePtr); - if (OPCODE_IS_END(Opcode)) { - break; - } + OP_BUFFER_POST_ADJUST(*buff, 2); +} - UnwindCodePtr += UnwindCodeSizeTable[Opcode]; - ScopeSize++; +void emit_alloc(char** buff, LONG size) { + + union uop_alloc_l { + long val; + struct { + long x : 24; // allocate large stack with size < 256M (2^24 *16) + long fixed : 8; + }; + }; + + union uop_alloc_m { + short val; + struct { + short x : 11; // allocate large stack with size < 32K (2^11 * 16) + short fixed : 5; + }; + }; + + union uop_alloc_s { + char val; + struct { + char x : 5; // allocate small stack with size < 512 (2^5 * 16) + char fixed : 3; + }; + }; + + if (size >= 16384) { + union uop_alloc_l* op; + + OP_BUFFER_PRE_ADJUST(*buff, 4); + + op = (union uop_alloc_l*)(*buff); + op->fixed = 0xE0; + op->x = size / 16; + + OP_BUFFER_POST_ADJUST(*buff, 4); } + else if (size >= 512) { + union uop_alloc_m* op; - // - // Epilogs have one extra instruction at the end that needs to be - // accounted for. - // + OP_BUFFER_PRE_ADJUST(*buff, 2); - if (IsEpilog) { - ScopeSize++; + op = (union uop_alloc_m*)(*buff); + op->fixed = 0x18; + op->x = (short)(size / 16); + + OP_BUFFER_POST_ADJUST(*buff, 2); } + else { + union uop_alloc_s* op; - return ScopeSize; + OP_BUFFER_PRE_ADJUST(*buff, 1); + + op = (union uop_alloc_s*)(*buff); + op->fixed = 0x0; + op->x = (char)(size / 16); + + OP_BUFFER_POST_ADJUST(*buff, 1); + } } -NTSTATUS -RtlpUnwindRestoreRegisterRange( - __inout PT_CONTEXT ContextRecord, - _In_ LONG SpOffset, - _In_ ULONG FirstRegister, - _In_ ULONG RegisterCount, - _In_ PARM64_UNWIND_PARAMS UnwindParams - ) +void emit_end(char** buff) { + char* op; -/*++ + OP_BUFFER_PRE_ADJUST(*buff, 1); -Routine Description: + op = (char*)(*buff); + *op = 0xE4; - Restores a series of integer registers from the stack. + OP_BUFFER_POST_ADJUST(*buff, 1); +} -Arguments: +void emit_end_c(char** buff) { + char* op; - ContextRecord - Supplies the address of a context record. + OP_BUFFER_PRE_ADJUST(*buff, 1); - SpOffset - Specifies a stack offset. Positive values are simply used - as a base offset. Negative values assume a predecrement behavior: - a 0 offset is used for restoration, but the absolute value of the - offset is added to the final Sp. + op = (char*)(*buff); + *op = 0xE5; - FirstRegister - Specifies the index of the first register to restore. + OP_BUFFER_POST_ADJUST(*buff, 1); +} - RegisterCount - Specifies the number of registers to restore. +void emit_set_fp(char** buff) { + char* op; - UnwindParams - Additional parameters shared with caller. + OP_BUFFER_PRE_ADJUST(*buff, 1); -Return Value: + op = (char*)(*buff); + *op = 0xE1; - None. + OP_BUFFER_POST_ADJUST(*buff, 1); +} ---*/ +void emit_nop(char** buff) { + char* op; -{ - ULONG_PTR CurAddress; + OP_BUFFER_PRE_ADJUST(*buff, 1); + + op = (char*)(*buff); + *op = 0xE3; + + OP_BUFFER_POST_ADJUST(*buff, 1); +} + +void emit_pac(char** buff) { + char* op; + + OP_BUFFER_PRE_ADJUST(*buff, 1); + + op = (char*)(*buff); + *op = 0xFC; + + OP_BUFFER_POST_ADJUST(*buff, 1); +} + +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + +#pragma warning(pop) + +#define NO_HOME_NOPS ((size_t)-1) + +VOID +RtlpExpandCompactToFull ( + _In_ IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY* fnent_pdata, + _Inout_ IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_XDATA* fnent_xdata +) +{ + + LONG intsz; + LONG fpsz; + LONG savsz; + LONG locsz; + LONG famsz; + BOOLEAN sav_predec_done = FALSE; + BOOLEAN fp_set = FALSE; + LONG sav_slot = 0; + char* op_buffer; + char* op_buffer_start; + char* op_buffer_end; + size_t op_buffer_used; + size_t ops_before_nops = NO_HOME_NOPS; + + // + // Calculate sizes. + // + + famsz = fnent_pdata->FrameSize * 2; + intsz = fnent_pdata->RegI; + if (fnent_pdata->CR == PdataCrUnchainedSavedLr) { + intsz += 1; // lr + } + + fpsz = fnent_pdata->RegF; + if (fnent_pdata->RegF != 0) { + fpsz += 1; + } + + savsz = intsz + fpsz; + + // + // Usually Homes are saved as part of the savesz area. + // In other words, they are saved in the space allocated + // by the pre-decrement operation performed by a non-volatile + // register save. If there are no non-volatile register saves, + // then Homes are saved in the localsz area. + // + + if (savsz > 0) { + savsz += (fnent_pdata->H * 8); + } + + savsz = ALIGN_UP_BY(savsz, 2); + locsz = famsz - savsz; + + // + // Initialize xdata main header. + // + + fnent_xdata->FunctionLength = fnent_pdata->FunctionLength; + fnent_xdata->Version = 0; + fnent_xdata->ExceptionDataPresent = 0; + op_buffer_start = (char*)(fnent_xdata + 1); + op_buffer_end = op_buffer_start + ((fnent_xdata->CodeWords) * 4); + op_buffer = op_buffer_start; + + DBG_OP("end\n"); + emit_end(&op_buffer); + + if (fnent_pdata->CR == PdataCrChainedWithPac) { + DBG_OP("pac\n"); + emit_pac(&op_buffer); + } + + // + // Save the integer registers. + // + + if (intsz != 0) { + ULONG intreg; + + // + // Special case for only x19 + LR, for which an _x option is not + // available, so do the SP decrement by itself first. + // + + if ((fnent_pdata->RegI == 1) && (fnent_pdata->CR == PdataCrUnchainedSavedLr)) { + DBG_OP("alloc_s (%i)\n", savsz * 8); + emit_alloc(&op_buffer, savsz * 8); + sav_predec_done = TRUE; + } + + // + // Issue save-pair instructions as long as there are even number + // or registers to lave left. + // + + for (intreg = 0; intreg < ((fnent_pdata->RegI / 2) * 2); intreg += 2) { + if (!sav_predec_done) { + DBG_OP("save_regp_x\t(%s, %s, %i)\n", int_reg_names[intreg], int_reg_names[intreg + 1], -savsz * 8); + emit_save_regp_x(&op_buffer, intreg, -savsz * 8); + sav_slot += 2; + sav_predec_done = TRUE; + } + else { + DBG_OP("save_regp\t(%s, %s, %i)\n", int_reg_names[intreg], int_reg_names[intreg + 1], sav_slot * 8); + emit_save_regp(&op_buffer, intreg, sav_slot * 8); + sav_slot += 2; + } + } + + // + // Address the remaining possible cases: + // - Last remaining odd register + // - LR, when CR=1 (saving LR needed but no FP chain) + // - Both, as a pair + // + + if ((fnent_pdata->RegI % 2) == 1) { + if (fnent_pdata->CR == PdataCrUnchainedSavedLr) { + + // + // special case at the top of the function makes sure + // !sav_predec_done can't even happen. + // + + _ASSERTE(sav_predec_done); + + DBG_OP("save_lrpair\t(%s, %i)\n", int_reg_names[intreg], sav_slot * 8); + emit_save_lrpair(&op_buffer, intreg, sav_slot * 8); + sav_slot += 2; + } + else { + if (!sav_predec_done) { + DBG_OP("save_reg_x\t(%s, %i)\n", int_reg_names[intreg], -savsz * 8); + emit_save_reg_x(&op_buffer, intreg, -savsz * 8); + sav_slot += 1; + sav_predec_done = TRUE; + } + else { + DBG_OP("save_reg\t(%s, %i)\n", int_reg_names[intreg], sav_slot * 8); + emit_save_reg(&op_buffer, intreg, sav_slot * 8); + sav_slot += 1; + } + } + } + else { + if (fnent_pdata->CR == PdataCrUnchainedSavedLr) { + if (!sav_predec_done) { + DBG_OP("save_reg_x\t(%s, %i)\n", int_reg_names[11], -savsz * 8); + emit_save_reg_x(&op_buffer, 11, -savsz * 8); + sav_slot += 1; + sav_predec_done = TRUE; + } + else { + DBG_OP("save_reg\t(%s, %i)\n", int_reg_names[11], sav_slot * 8); + emit_save_reg(&op_buffer, 11, sav_slot * 8); + sav_slot += 1; + } + } + } + } + + // + // Save the floating point registers. + // + + if (fpsz != 0) { + LONG fpreg; + + for (fpreg = 0; fpreg < ((fpsz / 2) * 2); fpreg += 2) { + if (!sav_predec_done) { + DBG_OP("save_fregp_x\t(%s, %s, %i)\n", fp_reg_names[fpreg], fp_reg_names[fpreg + 1], -savsz * 8); + emit_save_fregp_x(&op_buffer, fpreg, -savsz * 8); + sav_slot += 2; + sav_predec_done = TRUE; + } + else { + DBG_OP("save_fregp\t(%s, %s, %i)\n", fp_reg_names[fpreg], fp_reg_names[fpreg + 1], sav_slot * 8); + emit_save_fregp(&op_buffer, fpreg, sav_slot * 8); + sav_slot += 2; + } + } + + if ((fpsz % 2) == 1) { + if (!sav_predec_done) { + DBG_OP("save_freg_x\t(%s, %i)\n", fp_reg_names[fpreg], -savsz * 8); + emit_save_freg_x(&op_buffer, fpreg, -savsz * 8); + sav_slot += 1; + sav_predec_done = TRUE; + } + else { + DBG_OP("save_freg\t(%s, %i)\n", fp_reg_names[fpreg], sav_slot * 8); + emit_save_freg(&op_buffer, fpreg, sav_slot * 8); + sav_slot += 1; + } + } + } + + // + // Save parameter registers. Record the instructions + // that save them, if Homes are being saved into the + // savesz area. If they are being saved into the localsz + // area, then they don't realy need to be indicated since + // they are no-ops and there is nothing following them. + // In that case, the Homes save instructions will just + // be considered part of the body. + // + + if ((fnent_pdata->H != 0) && sav_predec_done) { + ops_before_nops = op_buffer - op_buffer_start; + DBG_OP("nop\nnop\nnop\nnop\n"); + emit_nop(&op_buffer); + emit_nop(&op_buffer); + emit_nop(&op_buffer); + emit_nop(&op_buffer); + } + + // + // Reserve space for locals and fp,lr chain. + // + + if (locsz > 0) { + if ((fnent_pdata->CR == PdataCrChained) || + (fnent_pdata->CR == PdataCrChainedWithPac)) { + + if (locsz <= (512 / 8)) { + DBG_OP("save_fplr_x\t(%i)\n", -locsz * 8); + emit_save_fplr_x(&op_buffer, -locsz * 8); + } + else { + DBG_OP("alloc\t\t(%i)\n", locsz * 8); + emit_alloc(&op_buffer, locsz * 8); + DBG_OP("save_fplr\t(%i)\n", 0); + emit_save_fplr(&op_buffer, 0); + } + + DBG_OP("set_fp\n"); + emit_set_fp(&op_buffer); + fp_set = TRUE; + } + else { + DBG_OP("alloc\t\t(%i)\n", locsz * 8); + emit_alloc(&op_buffer, locsz * 8); + } + } + + if (fnent_pdata->Flag == PdataPackedUnwindFragment) { + DBG_OP("end_c\n"); + emit_end_c(&op_buffer); + } + + // + // Adjust epilog information in the header + // + + if (fnent_pdata->Flag == PdataPackedUnwindFragment) { + + // + // Fragment case: no epilog + // + + fnent_xdata->EpilogInHeader = 0; + fnent_xdata->EpilogCount = 0; + } + else { + + // + // With EpilogInHeader true, EpilogCount represents + // the op index to the start of the epilog. If the + // set_fp is present in the prolog, set this field + // to 1 so that this op is skipped for the epilog. + // + + fnent_xdata->EpilogInHeader = 1; + if (fp_set) { + fnent_xdata->EpilogCount = 1; + } + else { + fnent_xdata->EpilogCount = 0; + } + } + + // + // Flip the buffer around. This will acomplish two + // needed things: + // - Opcodes closer to the body show first; + // - Opcodes become big-endian, as they should. + // + + op_buffer_used = op_buffer - op_buffer_start; + if (op_buffer_used > 1) { + char* lo = op_buffer_start; + char* hi = op_buffer - 1; + char swap; + while (lo < hi) { + swap = *lo; + *lo++ = *hi; + *hi-- = swap; + } + } + + // + // On functions with homed parameters, generate the + // epilog by copying the prolog minus the param + // saving NOPs. + // + + if ((ops_before_nops != NO_HOME_NOPS) && (fnent_xdata->EpilogInHeader != 0)) { + char* src = op_buffer - 1; + char* dst = src + op_buffer_used -4; + char* skip = src - ops_before_nops; + while (src >= op_buffer_start) { + if (src == skip) { + src -= 4; + continue; + } + + *dst-- = *src--; + } + + fnent_xdata->EpilogCount += (ULONG)op_buffer_used; + op_buffer_used = (op_buffer_used * 2) - 4; + } + + // + // Adjust the CodeWords count. + // + + op_buffer_used = ALIGN_UP_BY(op_buffer_used, 4); + op_buffer_used /= 4; + fnent_xdata->CodeWords = (ULONG)op_buffer_used; + + return; +} + + +static +ULONG_PTR +RtlpGetUnwindCodeSize ( + _In_ ULONG UnwindCode, + _In_opt_ PULONG ScopeSize + ) + +/*++ + +Routine Description: + + This function determines the number of bytes in an unwind code based on the + first byte of that unwind code. + +Argument: + + UnwindCode - Supplies the first byte of the unwind code. + + ScopeSize - Supplies a pointer to a variable that is incremented by the + number of instructions represented by the specified unwind code. + +Return Value: + + The number of bytes in the specified unwind code is returned as the + function value. + +--*/ + +{ + _ASSERTE(UnwindCode <= 0xFF); + + if (UnwindCode < 0xC0) { + if (ARGUMENT_PRESENT(ScopeSize)) { + *ScopeSize += 1; + } + + return 1; + + } else if (UnwindCode < 0xE0) { + if (ARGUMENT_PRESENT(ScopeSize)) { + *ScopeSize += 1; + } + + return 2; + + } else { + if (ARGUMENT_PRESENT(ScopeSize)) { + *ScopeSize += UnwindCodeInstructionCountTable[UnwindCode - 0xE0]; + } + + return UnwindCodeSizeTable[UnwindCode - 0xE0]; + } +} + +static +ULONG +RtlpComputeScopeSize ( + __in ULONG_PTR UnwindCodePtr, + __in ULONG_PTR UnwindCodesEndPtr, + __in BOOLEAN IsEpilog, + __in PARM64_UNWIND_PARAMS UnwindParams + ) + +/*++ + +Routine Description: + + Computes the size of an prolog or epilog, in words. + +Arguments: + + UnwindCodePtr - Supplies a pointer to the start of the unwind + code sequence. + + UnwindCodesEndPtr - Supplies a pointer to the byte immediately + following the unwind code table, as described by the header. + + IsEpilog - Specifies TRUE if the scope describes an epilog, + or FALSE if it describes a prolog. + + UnwindParams - Additional parameters shared with caller. + +Return Value: + + The size of the scope described by the unwind codes, in halfword units. + +--*/ + +{ + ULONG ScopeSize; + BYTE Opcode; + + UNREFERENCED_PARAMETER(UnwindParams); + + // + // Iterate through the unwind codes until we hit an end marker. + // While iterating, accumulate the total scope size. + // + + ScopeSize = 0; + Opcode = 0; + while (UnwindCodePtr < UnwindCodesEndPtr) { + Opcode = MEMORY_READ_BYTE(UnwindParams, UnwindCodePtr); + if (OPCODE_IS_END(Opcode)) { + break; + } + + UnwindCodePtr += RtlpGetUnwindCodeSize(Opcode, &ScopeSize); + } + + // + // Epilogs have one extra instruction at the end that needs to be + // accounted for. + // + + if (IsEpilog) { + ScopeSize++; + } + + return ScopeSize; +} + +static +NTSTATUS +RtlpUnwindRestoreRegisterRange ( + _Inout_ PCONTEXT ContextRecord, + _In_ LONG SpOffset, + _In_range_(0, 30) ULONG FirstRegister, + _In_range_(1, 31-FirstRegister) ULONG RegisterCount, + _In_ PARM64_UNWIND_PARAMS UnwindParams + ) + +/*++ + +Routine Description: + + Restores a series of integer registers from the stack. + +Arguments: + + ContextRecord - Supplies the address of a context record. + + SpOffset - Specifies a stack offset. Positive values are simply used + as a base offset. Negative values assume a predecrement behavior: + a 0 offset is used for restoration, but the absolute value of the + offset is added to the final Sp. + + FirstRegister - Specifies the index of the first register to restore. + + RegisterCount - Specifies the number of registers to restore. + + UnwindParams - Additional parameters shared with caller. + +Return Value: + + None. + +--*/ + +{ + ULONG_PTR CurAddress; + ULONG RegIndex; + NTSTATUS Status; + + // + // Validate non-overflowing register count. + // + + if ((FirstRegister + RegisterCount) > 31) { + return STATUS_UNWIND_INVALID_SEQUENCE; + } + + // + // Compute the source address and validate it. + // + + CurAddress = ContextRecord->Sp; + if (SpOffset >= 0) { + CurAddress += SpOffset; + } + + Status = STATUS_SUCCESS; + VALIDATE_STACK_ADDRESS(UnwindParams, ContextRecord, 8 * RegisterCount, 8, &Status); + if (Status != STATUS_SUCCESS) { + return Status; + } + + // + // Restore the registers + // + + for (RegIndex = 0; RegIndex < RegisterCount; RegIndex++) { + UPDATE_CONTEXT_POINTERS(UnwindParams, FirstRegister + RegIndex, CurAddress); + ContextRecord->X[FirstRegister + RegIndex] = MEMORY_READ_QWORD(UnwindParams, CurAddress); + CurAddress += 8; + } + if (SpOffset < 0) { + ContextRecord->Sp -= SpOffset; + } + + return STATUS_SUCCESS; +} + +static +NTSTATUS +RtlpUnwindRestoreFpRegisterRange ( + __inout PCONTEXT ContextRecord, + __in LONG SpOffset, + __in ULONG FirstRegister, + __in ULONG RegisterCount, + __in PARM64_UNWIND_PARAMS UnwindParams + ) + +/*++ + +Routine Description: + + Restores a series of floating-point registers from the stack. + +Arguments: + + ContextRecord - Supplies the address of a context record. + + SpOffset - Specifies a stack offset. Positive values are simply used + as a base offset. Negative values assume a predecrement behavior: + a 0 offset is used for restoration, but the absolute value of the + offset is added to the final Sp. + + FirstRegister - Specifies the index of the first register to restore. + + RegisterCount - Specifies the number of registers to restore. + + UnwindParams - Additional parameters shared with caller. + +Return Value: + + None. + +--*/ + +{ + ULONG_PTR CurAddress; ULONG RegIndex; NTSTATUS Status; + // + // Validate non-overflowing register count. + // + + if ((FirstRegister + RegisterCount) > 32) { + return STATUS_UNWIND_INVALID_SEQUENCE; + } + // // Compute the source address and validate it. // @@ -499,12 +1307,8 @@ Return Value: // for (RegIndex = 0; RegIndex < RegisterCount; RegIndex++) { - UPDATE_CONTEXT_POINTERS(UnwindParams, FirstRegister + RegIndex, CurAddress); -#ifdef __GNUC__ - *(&ContextRecord->X0 + FirstRegister + RegIndex) = MEMORY_READ_QWORD(UnwindParams, CurAddress); -#else - ContextRecord->X[FirstRegister + RegIndex] = MEMORY_READ_QWORD(UnwindParams, CurAddress); -#endif + UPDATE_FP_CONTEXT_POINTERS(UnwindParams, FirstRegister + RegIndex, CurAddress); + ContextRecord->V[FirstRegister + RegIndex].Low = MEMORY_READ_QWORD(UnwindParams, CurAddress); CurAddress += 8; } if (SpOffset < 0) { @@ -514,20 +1318,21 @@ Return Value: return STATUS_SUCCESS; } +static NTSTATUS -RtlpUnwindRestoreFpRegisterRange( - __inout PT_CONTEXT ContextRecord, - _In_ LONG SpOffset, - _In_ ULONG FirstRegister, - _In_ ULONG RegisterCount, - _In_ PARM64_UNWIND_PARAMS UnwindParams +RtlpUnwindRestoreSimdRegisterRange ( + __inout PCONTEXT ContextRecord, + __in LONG SpOffset, + __in ULONG FirstRegister, + __in ULONG RegisterCount, + __in PARM64_UNWIND_PARAMS UnwindParams ) /*++ Routine Description: - Restores a series of floating-point registers from the stack. + Restores a series of full SIMD (Q) registers from the stack. Arguments: @@ -555,6 +1360,14 @@ Return Value: ULONG RegIndex; NTSTATUS Status; + // + // Validate non-overflowing register count. + // + + if ((FirstRegister + RegisterCount) > 32) { + return STATUS_UNWIND_INVALID_SEQUENCE; + } + // // Compute the source address and validate it. // @@ -565,37 +1378,276 @@ Return Value: } Status = STATUS_SUCCESS; - VALIDATE_STACK_ADDRESS(UnwindParams, ContextRecord, 8 * RegisterCount, 8, &Status); + VALIDATE_STACK_ADDRESS(UnwindParams, ContextRecord, 16 * RegisterCount, 16, &Status); if (Status != STATUS_SUCCESS) { return Status; } - // - // Restore the registers - // + // + // Restore the registers + // + + for (RegIndex = 0; RegIndex < RegisterCount; RegIndex++) { + UPDATE_FP_CONTEXT_POINTERS(UnwindParams, FirstRegister + RegIndex, CurAddress); + ContextRecord->V[FirstRegister + RegIndex].Low = MEMORY_READ_QWORD(UnwindParams, CurAddress); + CurAddress += 8; + ContextRecord->V[FirstRegister + RegIndex].High = MEMORY_READ_QWORD(UnwindParams, CurAddress); + CurAddress += 8; + } + if (SpOffset < 0) { + ContextRecord->Sp -= SpOffset; + } + + return STATUS_SUCCESS; +} + +static +NTSTATUS +RtlpUnwindCustom ( + __inout PCONTEXT ContextRecord, + __in BYTE Opcode, + __in PARM64_UNWIND_PARAMS UnwindParams + ) + +/*++ + +Routine Description: + + Handles custom unwinding operations involving machine-specific + frames. + +Arguments: + + ContextRecord - Supplies the address of a context record. + + Opcode - The opcode to decode. + + UnwindParams - Additional parameters shared with caller. + +Return Value: + + An NTSTATUS indicating either STATUS_SUCCESS if everything went ok, or + another status code if there were problems. + +--*/ + +{ + ULONG Fpcr; + ULONG Fpsr; + ULONG RegIndex; + ULONG_PTR SourceAddress; + ULONG_PTR StartingSp; + NTSTATUS Status; + ULONG_PTR VfpStateAddress; + + StartingSp = ContextRecord->Sp; + Status = STATUS_SUCCESS; + + // + // The opcode describes the special-case stack + // + + switch (Opcode) + { + + // + // Trap frame case + // + + case 0xe8: // MSFT_OP_TRAP_FRAME: + + // + // Ensure there is enough valid space for the trap frame + // + + VALIDATE_STACK_ADDRESS(UnwindParams, ContextRecord, sizeof(ARM64_KTRAP_FRAME), 16, &Status); + if (!NT_SUCCESS(Status)) { + return Status; + } + + // + // Restore X0-X18, and D0-D7 + // + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_KTRAP_FRAME, X); + for (RegIndex = 0; RegIndex < 19; RegIndex++) { + UPDATE_CONTEXT_POINTERS(UnwindParams, RegIndex, SourceAddress); + ContextRecord->X[RegIndex] = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + SourceAddress += sizeof(ULONG_PTR); + } + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_KTRAP_FRAME, VfpState); + VfpStateAddress = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + if (VfpStateAddress != 0) { + + SourceAddress = VfpStateAddress + FIELD_OFFSET(KARM64_VFP_STATE, Fpcr); + Fpcr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + SourceAddress = VfpStateAddress + FIELD_OFFSET(KARM64_VFP_STATE, Fpsr); + Fpsr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + if (Fpcr != -1 && Fpsr != -1) { + + ContextRecord->Fpcr = Fpcr; + ContextRecord->Fpsr = Fpsr; + + SourceAddress = VfpStateAddress + FIELD_OFFSET(KARM64_VFP_STATE, V); + for (RegIndex = 0; RegIndex < 32; RegIndex++) { + UPDATE_FP_CONTEXT_POINTERS(UnwindParams, RegIndex, SourceAddress); + ContextRecord->V[RegIndex].Low = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + ContextRecord->V[RegIndex].High = MEMORY_READ_QWORD(UnwindParams, SourceAddress + 8); + SourceAddress += 2 * sizeof(ULONGLONG); + } + } + } + + // + // Restore R11, R12, SP, LR, PC, and the status registers + // + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_KTRAP_FRAME, Spsr); + ContextRecord->Cpsr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_KTRAP_FRAME, Sp); + ContextRecord->Sp = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_KTRAP_FRAME, Lr); + ContextRecord->Lr = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_KTRAP_FRAME, Fp); + ContextRecord->Fp = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_KTRAP_FRAME, Pc); + ContextRecord->Pc = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + + // + // Set the trap frame and clear the unwound-to-call flag + // + + UNWIND_PARAMS_SET_TRAP_FRAME(UnwindParams, StartingSp, sizeof(ARM64_KTRAP_FRAME)); + ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL; + break; + + // + // Machine frame case + // + + case 0xe9: // MSFT_OP_MACHINE_FRAME: + + // + // Ensure there is enough valid space for the machine frame + // + + VALIDATE_STACK_ADDRESS(UnwindParams, ContextRecord, 16, 16, &Status); + if (!NT_SUCCESS(Status)) { + return Status; + } + + // + // Restore the SP and PC, and clear the unwound-to-call flag + // + + ContextRecord->Sp = MEMORY_READ_QWORD(UnwindParams, StartingSp + 0); + ContextRecord->Pc = MEMORY_READ_QWORD(UnwindParams, StartingSp + 8); + ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL; + break; + + // + // Context case + // + + case 0xea: // MSFT_OP_CONTEXT: + + // + // Ensure there is enough valid space for the full CONTEXT structure + // + + VALIDATE_STACK_ADDRESS(UnwindParams, ContextRecord, sizeof(ARM64_CONTEXT), 16, &Status); + if (!NT_SUCCESS(Status)) { + return Status; + } + + // + // Restore X0-X28, and D0-D31 + // + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, X); + for (RegIndex = 0; RegIndex < 29; RegIndex++) { + UPDATE_CONTEXT_POINTERS(UnwindParams, RegIndex, SourceAddress); + ContextRecord->X[RegIndex] = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + SourceAddress += sizeof(ULONG_PTR); + } + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, V); + for (RegIndex = 0; RegIndex < 32; RegIndex++) { + UPDATE_FP_CONTEXT_POINTERS(UnwindParams, RegIndex, SourceAddress); + ContextRecord->V[RegIndex].Low = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + ContextRecord->V[RegIndex].High = MEMORY_READ_QWORD(UnwindParams, SourceAddress + 8); + SourceAddress += 2 * sizeof(ULONGLONG); + } + + // + // Restore SP, LR, PC, and the status registers + // + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, Cpsr); + ContextRecord->Cpsr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, Fp); + ContextRecord->Fp = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, Lr); + ContextRecord->Lr = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, Sp); + ContextRecord->Sp = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, Pc); + ContextRecord->Pc = MEMORY_READ_QWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, Fpcr); + ContextRecord->Fpcr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, Fpsr); + ContextRecord->Fpsr = MEMORY_READ_DWORD(UnwindParams, SourceAddress); + + // + // Inherit the unwound-to-call flag from this context + // + + SourceAddress = StartingSp + FIELD_OFFSET(ARM64_CONTEXT, ContextFlags); + ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL; + ContextRecord->ContextFlags |= + MEMORY_READ_DWORD(UnwindParams, SourceAddress) & CONTEXT_UNWOUND_TO_CALL; + break; + + case 0xeb: // MSFT_OP_EC_CONTEXT: + // NOTE: for .NET, the arm64ec context restoring is not implemented + _ASSERTE(FALSE); + return STATUS_UNSUCCESSFUL; + + case 0xec: // MSFT_OP_CLEAR_UNWOUND_TO_CALL + ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL; + ContextRecord->Pc = ContextRecord->Lr; + break; - for (RegIndex = 0; RegIndex < RegisterCount; RegIndex++) { - UPDATE_FP_CONTEXT_POINTERS(UnwindParams, FirstRegister + RegIndex, CurAddress); - ContextRecord->V[FirstRegister + RegIndex].Low = MEMORY_READ_QWORD(UnwindParams, CurAddress); - CurAddress += 8; - } - if (SpOffset < 0) { - ContextRecord->Sp -= SpOffset; + default: + return STATUS_UNSUCCESSFUL; } return STATUS_SUCCESS; } NTSTATUS -RtlpUnwindFunctionFull( - _In_ DWORD64 ControlPcRva, - _In_ ULONG_PTR ImageBase, - _In_ PT_RUNTIME_FUNCTION FunctionEntry, - __inout T_CONTEXT *ContextRecord, - _Out_ PDWORD64 EstablisherFrame, - _Outptr_opt_result_maybenull_ PEXCEPTION_ROUTINE *HandlerRoutine, - _Out_ PVOID *HandlerData, - _In_ PARM64_UNWIND_PARAMS UnwindParams +RtlpUnwindFunctionFull ( + __in ULONG ControlPcRva, + __in ULONG_PTR ImageBase, + __in PRUNTIME_FUNCTION FunctionEntry, + __in IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_XDATA *FunctionEntryExtended, + __inout PCONTEXT ContextRecord, + __out PULONG_PTR EstablisherFrame, + __deref_opt_out_opt PEXCEPTION_ROUTINE *HandlerRoutine, + __out PVOID *HandlerData, + __in PARM64_UNWIND_PARAMS UnwindParams, + __in ULONG UnwindFlags ) /*++ @@ -636,10 +1688,12 @@ Routine Description: returned. HandlerData - Supplies a pointer to a variable that receives a pointer - the language handler data. + the the language handler data. UnwindParams - Additional parameters shared with caller. + UnwindFlags - Supplies additional flags for the unwind operation. + Return Value: STATUS_SUCCESS if the unwind could be completed, a failure status otherwise. @@ -657,11 +1711,11 @@ Return Value: ULONG FunctionLength; ULONG HeaderWord; ULONG NextCode; - DWORD64 OffsetInFunction; + ULONG OffsetInFunction; ULONG ScopeNum; ULONG ScopeSize; ULONG ScopeStart; - DWORD64 SkipWords; + ULONG SkipWords; NTSTATUS Status; ULONG_PTR UnwindCodePtr; ULONG_PTR UnwindCodesEndPtr; @@ -669,6 +1723,8 @@ Return Value: ULONG UnwindIndex; ULONG UnwindWords; + UNREFERENCED_PARAMETER(UnwindFlags); + // // Unless a special frame is encountered, assume that any unwinding // will return us to the return address of a call and set the flag @@ -689,7 +1745,10 @@ Return Value: // Fetch the header word from the .xdata blob // - UnwindDataPtr = ImageBase + FunctionEntry->UnwindData; + UnwindDataPtr = (FunctionEntryExtended != NULL) ? + ((ULONG_PTR)FunctionEntryExtended) : + (ImageBase + FunctionEntry->UnwindData); + HeaderWord = MEMORY_READ_DWORD(UnwindParams, UnwindDataPtr); UnwindDataPtr += 4; @@ -717,11 +1776,11 @@ Return Value: UnwindWords = (EpilogScopeCount >> 16) & 0xff; EpilogScopeCount &= 0xffff; } + + UnwindIndex = 0; if ((HeaderWord & (1 << 21)) != 0) { UnwindIndex = EpilogScopeCount; EpilogScopeCount = 0; - } else { - UnwindIndex = 0; } // @@ -791,6 +1850,11 @@ Return Value: ScopeSize = RtlpComputeScopeSize(UnwindCodePtr + UnwindIndex, UnwindCodesEndPtr, TRUE, UnwindParams); ScopeStart = FunctionLength - ScopeSize; + // + // N.B. This code assumes that no handleable exceptions can occur in + // the prolog or in a chained shrink-wrapping prolog region. + // + if (OffsetInFunction >= ScopeStart) { UnwindCodePtr += UnwindIndex; SkipWords = OffsetInFunction - ScopeStart; @@ -850,7 +1914,7 @@ Return Value: if (OPCODE_IS_END(CurCode)) { break; } - UnwindCodePtr += UnwindCodeSizeTable[CurCode]; + UnwindCodePtr += RtlpGetUnwindCodeSize(CurCode, NULL); SkipWords--; } @@ -885,7 +1949,7 @@ Return Value: ContextRecord, -8 * (CurCode & 0x1f), 19, - 2 + 2 * AccumulatedSaveNexts, + 2 + (2 * AccumulatedSaveNexts), UnwindParams); AccumulatedSaveNexts = 0; } @@ -946,7 +2010,7 @@ Return Value: ContextRecord, 8 * (NextCode & 0x3f), 19 + ((CurCode & 3) << 2) + (NextCode >> 6), - 2 + 2 * AccumulatedSaveNexts, + 2 + (2 * AccumulatedSaveNexts), UnwindParams); AccumulatedSaveNexts = 0; } @@ -962,7 +2026,7 @@ Return Value: ContextRecord, -8 * ((NextCode & 0x3f) + 1), 19 + ((CurCode & 3) << 2) + (NextCode >> 6), - 2 + 2 * AccumulatedSaveNexts, + 2 + (2 * AccumulatedSaveNexts), UnwindParams); AccumulatedSaveNexts = 0; } @@ -1040,7 +2104,7 @@ Return Value: ContextRecord, 8 * (NextCode & 0x3f), 8 + ((CurCode & 1) << 2) + (NextCode >> 6), - 2 + 2 * AccumulatedSaveNexts, + 2 + (2 * AccumulatedSaveNexts), UnwindParams); AccumulatedSaveNexts = 0; } @@ -1056,7 +2120,7 @@ Return Value: ContextRecord, -8 * ((NextCode & 0x3f) + 1), 8 + ((CurCode & 1) << 2) + (NextCode >> 6), - 2 + 2 * AccumulatedSaveNexts, + 2 + (2 * AccumulatedSaveNexts), UnwindParams); AccumulatedSaveNexts = 0; } @@ -1158,29 +2222,111 @@ Return Value: } // - // end_c (11100101): end of unwind code in current chained scope + // end_c (11100101): end of unwind code in current chained scope. + // Continue unwinding parent scope. // else if (CurCode == 0xe5) { - if (AccumulatedSaveNexts != 0) { - return STATUS_UNWIND_INVALID_SEQUENCE; - } - goto finished; + NOTHING; } // - // save_next (11100110): save next non-volatile Int or FP register pair. + // save_next_pair (11100110): save next non-volatile Int or FP register pair. // else if (CurCode == 0xe6) { - AccumulatedSaveNexts++; + AccumulatedSaveNexts += 1; + } + + // + // 11100111 ' 0pxrrrrr ' ffoooooo + // p: 0/1 - single/pair + // x: 0/1 - positive offset / negative offset with writeback + // r: register number + // f: 00/01/10 - X / D / Q + // o: offset * 16 for x=1 or p=1 or f=Q / else offset * 8 + // + + else if (CurCode == 0xe7) { + LONG SpOffset; + ULONG RegCount; + union uop { + unsigned short val; + struct { + unsigned char val1; + unsigned char val2; + }; + struct { + unsigned short o : 6; + unsigned short f : 2; + unsigned short r : 5; + unsigned short x : 1; + unsigned short p : 1; + unsigned short fixed : 1; + }; + } op; + + op.val2 = MEMORY_READ_BYTE(UnwindParams, UnwindCodePtr); + UnwindCodePtr += 1; + op.val1 = MEMORY_READ_BYTE(UnwindParams, UnwindCodePtr); + UnwindCodePtr += 1; + + // + // save_next_pair only permited for pairs. + // + + if ((op.p == 0) && (AccumulatedSaveNexts != 0)) { + return STATUS_UNWIND_INVALID_SEQUENCE; + } + + if (op.fixed != 0) { + return STATUS_UNWIND_INVALID_SEQUENCE; + } + + SpOffset = op.o + op.x; + SpOffset *= ((op.x == 1) || (op.f == 2) || (op.p == 1)) ? (16) : (8); + SpOffset *= (op.x == 1) ? (-1) : (1); + RegCount = 1 + op.p + (2 * AccumulatedSaveNexts); + switch (op.f) { + case 0: + Status = RtlpUnwindRestoreRegisterRange( + ContextRecord, + SpOffset, + op.r, + RegCount, + UnwindParams); + break; + + case 1: + Status = RtlpUnwindRestoreFpRegisterRange( + ContextRecord, + SpOffset, + op.r, + RegCount, + UnwindParams); + break; + + case 2: + Status = RtlpUnwindRestoreSimdRegisterRange( + ContextRecord, + SpOffset, + op.r, + RegCount, + UnwindParams); + break; + + default: + return STATUS_UNWIND_INVALID_SEQUENCE; + } + + AccumulatedSaveNexts = 0; } // // custom_0 (111010xx): restore custom structure // - else if (CurCode >= 0xe8 && CurCode <= 0xeb) { + else if (CurCode >= 0xe8 && CurCode <= 0xec) { if (AccumulatedSaveNexts != 0) { return STATUS_UNWIND_INVALID_SEQUENCE; } @@ -1188,6 +2334,44 @@ Return Value: FinalPcFromLr = FALSE; } + // + // pac (11111100): function has pointer authentication + // + + else if (CurCode == 0xfc) { + if (AccumulatedSaveNexts != 0) { + return STATUS_UNWIND_INVALID_SEQUENCE; + } + + STRIP_PAC(UnwindParams, &ContextRecord->Lr); + + // + // TODO: Implement support for UnwindFlags RTL_VIRTUAL_UNWIND2_VALIDATE_PAC. + // + } + + // + // future/nop: the following ranges represent encodings reserved for + // future extension. They are treated as a nop and, therefore, no + // unwind action is taken. + // + // 11111000|yyyyyyyy + // 11111001|yyyyyyyy|yyyyyyyy + // 11111010|yyyyyyyy|yyyyyyyy|yyyyyyyy + // 11111011|yyyyyyyy|yyyyyyyy|yyyyyyyy|yyyyyyyy + // 111111xx + // + + else if (CurCode >= 0xf8) { + if (AccumulatedSaveNexts != 0) { + return STATUS_UNWIND_INVALID_SEQUENCE; + } + + if (CurCode <= 0xfb) { + UnwindCodePtr += 1 + (CurCode & 0x3); + } + } + // // Anything else is invalid // @@ -1224,33 +2408,126 @@ Return Value: } NTSTATUS -RtlpUnwindFunctionCompact( - _In_ DWORD64 ControlPcRva, - _In_ PT_RUNTIME_FUNCTION FunctionEntry, - __inout T_CONTEXT *ContextRecord, - _Out_ PDWORD64 EstablisherFrame, - _Outptr_opt_result_maybenull_ PEXCEPTION_ROUTINE *HandlerRoutine, +RtlpUnwindFunctionCompact ( + __in ULONG ControlPcRva, + __in ULONG_PTR ImageBase, + __in PRUNTIME_FUNCTION FunctionEntry, + __inout PCONTEXT ContextRecord, + __out PULONG_PTR EstablisherFrame, + __deref_opt_out_opt PEXCEPTION_ROUTINE *HandlerRoutine, + __out PVOID *HandlerData, + __in PARM64_UNWIND_PARAMS UnwindParams, + __in ULONG UnwindFlags + ) +{ + + NTSTATUS Status; + + // + // The longest possible array of unwind opcodes that a compressed format can generate is + // 28 + 24 bytes. Rounding it up to a multiple of 4, that results in an array of 52 bytes. + // Note that the following example isn't even fully legal as any allocation above 4KiB would + // require a call to __chkstk and, thus, rule-out compressed encoding. But since it can be + // encoded, it is considered here. + // + // Compressed: + // + // Flag = PdataPackedUnwindFunction + // RegF = 7 + // RegI = 10 + // H = 1 + // CR = PdataCrChainedWithPac + // FrameSize = 8000/16; + // + // Full Prolog: + // e1 40 c1 e7 e3 e3 e3 e3 d9 90 d9 0e d8 8c d8 0a ca 08 c9 86 c9 04 c8 82 cc 19 fc e4 + // + // Full Epilog (same as prolog minus the 4 x NOP for param home spill): + // e1 40 c1 e7 d9 90 d9 0e d8 8c d8 0a ca 08 c9 86 c9 04 c8 82 cc 19 fc e4 + // + // E4 end + // FC pac + // CC 19 save_regp_x (x19, x20, -208) + // C8 82 save_regp (x21, x22, 16) + // C9 04 save_regp (x23, x24, 32) + // C9 86 save_regp (x25, x26, 48) + // CA 08 save_regp (x27, x28, 64) + // D8 0A save_fregp (d8, d9, 80) + // D8 8C save_fregp (d10, d11, 96) + // D9 0E save_fregp (d12, d13, 112) + // D9 90 save_fregp (d14, d15, 128) + // E3 nop + // E3 nop + // E3 nop + // E3 nop + // C1 E7 alloc (7792) + // 40 save_fplr (0) + // E1 set_fp + // + + struct LOCAL_XDATA { + IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_XDATA xdata; + char ops[60]; + } fnent_xdata = {}; + + fnent_xdata.xdata.CodeWords = sizeof(fnent_xdata.ops) / 4; + RtlpExpandCompactToFull(FunctionEntry, &fnent_xdata.xdata); + Status = RtlpUnwindFunctionFull(ControlPcRva, + ImageBase, + FunctionEntry, + &fnent_xdata.xdata, + ContextRecord, + EstablisherFrame, + HandlerRoutine, + HandlerData, + UnwindParams, + UnwindFlags); + + return Status; +} + +#if !defined(DEBUGGER_UNWIND) + +NTSTATUS +RtlpxVirtualUnwind ( + _In_ ULONG HandlerType, + _In_ ULONG_PTR ImageBase, + _In_ ULONG_PTR ControlPc, + _In_opt_ PRUNTIME_FUNCTION FunctionEntry, + _Inout_ PCONTEXT ContextRecord, _Out_ PVOID *HandlerData, - _In_ PARM64_UNWIND_PARAMS UnwindParams + _Out_ PULONG_PTR EstablisherFrame, + _Inout_opt_ PKNONVOLATILE_CONTEXT_POINTERS ContextPointers, + _In_opt_ PULONG_PTR LowLimit, + _In_opt_ PULONG_PTR HighLimit, + _Outptr_opt_result_maybenull_ PEXCEPTION_ROUTINE *HandlerRoutine, + _In_ ULONG UnwindFlags ) /*++ Routine Description: - This function virtually unwinds the specified function by parsing the - compact .pdata record to determine where in the function the provided - ControlPc is, and then executing a standard, well-defined set of - operations. + This function virtually unwinds the specified function by executing its + prolog code backward or its epilog code forward. - If a context pointers record is specified (in the UnwindParams), then - the address where each nonvolatile register is restored from is recorded - in the appropriate element of the context pointers record. + If a context pointers record is specified, then the address where each + nonvolatile registers is restored from is recorded in the appropriate + element of the context pointers record. Arguments: - ControlPcRva - Supplies the address where control left the specified - function, as an offset relative to the ImageBase. + HandlerType - Supplies the handler type expected for the virtual unwind. + This may be either an exception or an unwind handler. A flag may + optionally be supplied to indicate that the unwind should assume + that the instruction at the PC is the one we are interested in + (versus the PC being a return address). + + ImageBase - Supplies the base address of the image that contains the + function being unwound. + + ControlPc - Supplies the address where control left the specified + function. FunctionEntry - Supplies the address of the function table entry for the specified function. If appropriate, this should have already been @@ -1258,9 +2535,21 @@ Routine Description: ContextRecord - Supplies the address of a context record. + HandlerData - Supplies a pointer to a variable that receives a pointer + the the language handler data. + EstablisherFrame - Supplies a pointer to a variable that receives the the establisher frame pointer value. + ContextPointers - Supplies an optional pointer to a context pointers + record. + + LowLimit - Supplies an optional low limit used to bound the establisher + frame. This must be supplied in conjunction with a high limit. + + HighLimit - Supplies an optional high limit used to bound the establisher + frame. This must be supplied in conjunction with a low limit. + HandlerRoutine - Supplies an optional pointer to a variable that receives the handler routine address. If control did not leave the specified function in either the prolog or an epilog and a handler of the @@ -1268,10 +2557,7 @@ Routine Description: language specific exception handler is returned. Otherwise, NULL is returned. - HandlerData - Supplies a pointer to a variable that receives a pointer - the language handler data. - - UnwindParams - Additional parameters shared with caller. + UnwindFlags - Supplies additional flags for the unwind operation. Return Value: @@ -1281,270 +2567,147 @@ Return Value: --*/ { - ULONG Count; - ULONG Cr; - ULONG CurrentOffset; - ULONG EpilogLength; - ULONG Flag; - ULONG FloatSize; - ULONG FrameSize; - ULONG FRegOpcodes; - ULONG FunctionLength; - ULONG HBit; - ULONG HOpcodes; - ULONG IRegOpcodes; - ULONG IntSize; - ULONG LocalSize; - DWORD64 OffsetInFunction; - DWORD64 OffsetInScope; - ULONG PrologLength; - ULONG RegF; - ULONG RegI; - ULONG RegSize; - ULONG ScopeStart; - ULONG StackAdjustOpcodes; + ULONG ControlPcRva; NTSTATUS Status; - ULONG UnwindData; - - UnwindData = FunctionEntry->UnwindData; - Status = STATUS_SUCCESS; - - // - // Compact records always describe an unwind to a call. - // + ARM64_UNWIND_PARAMS UnwindParams; + ULONG UnwindType; - ContextRecord->ContextFlags |= CONTEXT_UNWOUND_TO_CALL; - - // - // Extract the basic information about how to do a full unwind. - // - - Flag = UnwindData & 3; - FunctionLength = (UnwindData >> 2) & 0x7ff; - RegF = (UnwindData >> 13) & 7; - RegI = (UnwindData >> 16) & 0xf; - HBit = (UnwindData >> 20) & 1; - Cr = (UnwindData >> 21) & 3; - FrameSize = (UnwindData >> 23) & 0x1ff; - - if (Flag == 3) { - return STATUS_UNWIND_INVALID_SEQUENCE; - } - if (Cr == 2) { - return STATUS_UNWIND_INVALID_SEQUENCE; - } - - // - // Determine the size of the locals - // - - IntSize = RegI * 8; - if (Cr == 1) { - IntSize += 8; - } - FloatSize = (RegF == 0) ? 0 : (RegF + 1) * 8; - RegSize = (IntSize + FloatSize + 8*8 * HBit + 0xf) & ~0xf; - if (RegSize > 16 * FrameSize) { - return STATUS_UNWIND_INVALID_SEQUENCE; - } - LocalSize = 16 * FrameSize - RegSize; + UNREFERENCED_PARAMETER(HandlerType); - // - // If we're near the start of the function (within 17 words), - // see if we are within the prolog. - // - // N.B. If the low 2 bits of the UnwindData are 2, then we have - // no prolog. - // + _ASSERTE((UnwindFlags & ~RTL_VIRTUAL_UNWIND_VALID_FLAGS_ARM64) == 0); - OffsetInFunction = (ControlPcRva - FunctionEntry->BeginAddress) / 4; - OffsetInScope = 0; - if (OffsetInFunction < 17 && Flag != 2) { + if (FunctionEntry == NULL) { // - // Compute sizes for each opcode in the prolog. + // If the function does not have a function entry, then it is + // a pure leaf/trivial function. This means the stack pointer + // does not move, and LR is never overwritten, from the time + // it was called to the time it returns. To unwind such function, + // assign the value in LR to PC, simulating a simple ret instruction. // - IRegOpcodes = (IntSize + 8) / 16; - FRegOpcodes = (FloatSize + 8) / 16; - HOpcodes = 4 * HBit; - StackAdjustOpcodes = (Cr == 3) ? 1 : 0; - if (Cr != 3 || LocalSize > 512) { - StackAdjustOpcodes += (LocalSize > 4088) ? 2 : (LocalSize > 0) ? 1 : 0; - } - - // - // Compute the total prolog length and determine if we are within - // its scope. // - // N.B. We must execute prolog operations backwards to unwind, so - // our final scope offset in this case is the distance from the end. + // If the old control PC is the same as the return address, + // then no progress is being made and the stack is most + // likely malformed. // - PrologLength = IRegOpcodes + FRegOpcodes + HOpcodes + StackAdjustOpcodes; - - if (OffsetInFunction < PrologLength) { - OffsetInScope = PrologLength - OffsetInFunction; + if (ControlPc == ContextRecord->Lr) { + return STATUS_BAD_FUNCTION_TABLE; } - } - - // - // If we're near the end of the function (within 15 words), see if - // we are within the epilog. - // - // N.B. If the low 2 bits of the UnwindData are 2, then we have - // no epilog. - // - - if (OffsetInScope == 0 && OffsetInFunction + 15 >= FunctionLength && Flag != 2) { // - // Compute sizes for each opcode in the epilog. + // Set the point where control left the current function by + // obtaining the return address from the current context. + // Also indicate that we unwound from a call so that the + // language-specific handler can differentiate neighboring + // exception scopes. // - IRegOpcodes = (IntSize + 8) / 16; - FRegOpcodes = (FloatSize + 8) / 16; - HOpcodes = HBit; - StackAdjustOpcodes = (Cr == 3) ? 1 : 0; - if (Cr != 3 || LocalSize > 512) { - StackAdjustOpcodes += (LocalSize > 4088) ? 2 : (LocalSize > 0) ? 1 : 0; - } + ContextRecord->Pc = ContextRecord->Lr; + ContextRecord->ContextFlags |= CONTEXT_UNWOUND_TO_CALL; // - // Compute the total epilog length and determine if we are within - // its scope. + // Set remaining output data and return. All work done. // - EpilogLength = IRegOpcodes + FRegOpcodes + HOpcodes + StackAdjustOpcodes + 1; - - ScopeStart = FunctionLength - EpilogLength; - if (OffsetInFunction > ScopeStart) { - OffsetInScope = OffsetInFunction - ScopeStart; - } - } - - // - // Process operations backwards, in the order: stack/frame deallocation, - // VFP register popping, integer register popping, parameter home - // area recovery. - // - // First case is simple: we process everything with no regard for - // the current offset within the scope. - // - - Status = STATUS_SUCCESS; - if (OffsetInScope == 0) { - - if (Cr == 3) { - Status = RtlpUnwindRestoreRegisterRange(ContextRecord, 0, 29, 2, UnwindParams); - } - ContextRecord->Sp += LocalSize; - - if (RegF != 0 && Status == STATUS_SUCCESS) { - Status = RtlpUnwindRestoreFpRegisterRange(ContextRecord, IntSize, 8, RegF + 1, UnwindParams); + *EstablisherFrame = ContextRecord->Sp; + *HandlerData = NULL; + if (ARGUMENT_PRESENT(HandlerRoutine)) { + *HandlerRoutine = NULL; } - if (Cr == 1 && Status == STATUS_SUCCESS) { - Status = RtlpUnwindRestoreRegisterRange(ContextRecord, IntSize - 8, 30, 1, UnwindParams); - } - if (RegI > 0 && Status == STATUS_SUCCESS) { - Status = RtlpUnwindRestoreRegisterRange(ContextRecord, 0, 19, RegI, UnwindParams); - } - ContextRecord->Sp += RegSize; + return STATUS_SUCCESS; } // - // Second case is more complex: we must step along each operation - // to ensure it should be executed. + // Make sure out-of-bound stack accesses don't send us into an infinite + // unwinding loop. // +#if 0 + __try { +#endif + // + // Build an UnwindParams structure containing the starting PC, stack + // limits, and context pointers. + // - else { + UnwindParams.ControlPc = ControlPc; + UnwindParams.LowLimit = LowLimit; + UnwindParams.HighLimit = HighLimit; + UnwindParams.ContextPointers = ContextPointers; + UnwindType = (FunctionEntry->UnwindData & 3); - CurrentOffset = 0; - if (Cr == 3) { - if (LocalSize <= 512) { - if (CurrentOffset++ >= OffsetInScope) { - Status = RtlpUnwindRestoreRegisterRange(ContextRecord, -(LONG)LocalSize, 29, 2, UnwindParams); - } - LocalSize = 0; - } - } - while (LocalSize != 0) { - Count = (LocalSize + 4087) % 4088 + 1; - if (CurrentOffset++ >= OffsetInScope) { - ContextRecord->Sp += Count; - } - LocalSize -= Count; - } + // + // Unwind type 3 refers to a chained record. The top 30 bits of the + // unwind data contains the RVA of the parent pdata record. + // - if (HBit != 0) { - CurrentOffset += 4; - } + if (UnwindType == 3) { + if ((FunctionEntry->UnwindData & 4) == 0) { + FunctionEntry = (PRUNTIME_FUNCTION)(ImageBase + FunctionEntry->UnwindData - 3); + UnwindType = (FunctionEntry->UnwindData & 3); - if (RegF != 0 && Status == STATUS_SUCCESS) { - RegF++; - while (RegF != 0) { - Count = 2 - (RegF & 1); - RegF -= Count; - if (CurrentOffset++ >= OffsetInScope) { - Status = RtlpUnwindRestoreFpRegisterRange( - ContextRecord, - (RegF == 0 && RegI == 0) ? (-(LONG)RegSize) : (IntSize + 8 * RegF), - 8 + RegF, - Count, - UnwindParams); - } - } - } + _ASSERTE(UnwindType != 3); + + ControlPcRva = FunctionEntry->BeginAddress; - if (Cr == 1 && Status == STATUS_SUCCESS) { - if (RegI % 2 == 0) { - if (CurrentOffset++ >= OffsetInScope) { - Status = RtlpUnwindRestoreRegisterRange(ContextRecord, IntSize - 8, 30, 1, UnwindParams); - } } else { - if (CurrentOffset++ >= OffsetInScope) { - RegI--; - Status = RtlpUnwindRestoreRegisterRange(ContextRecord, IntSize - 8, 30, 1, UnwindParams); - if (Status == STATUS_SUCCESS) { - Status = RtlpUnwindRestoreRegisterRange(ContextRecord, IntSize - 16, 19 + RegI, 1, UnwindParams); - } - } + return STATUS_UNWIND_UNSUPPORTED_VERSION; } + + } else { + ControlPcRva = (ULONG)(ControlPc - ImageBase); } - while (RegI != 0 && Status == STATUS_SUCCESS) { - Count = 2 - (RegI & 1); - RegI -= Count; - if (CurrentOffset++ >= OffsetInScope) { - Status = RtlpUnwindRestoreRegisterRange( - ContextRecord, - (RegI == 0) ? (-(LONG)RegSize) : (8 * RegI), - 19 + RegI, - Count, - UnwindParams); - } + // + // Identify the compact .pdata format versus the full .pdata+.xdata format. + // + + if (UnwindType != 0) { + Status = RtlpUnwindFunctionCompact(ControlPcRva, + ImageBase, + FunctionEntry, + ContextRecord, + EstablisherFrame, + HandlerRoutine, + HandlerData, + &UnwindParams, + UnwindFlags); + + } else { + + Status = RtlpUnwindFunctionFull(ControlPcRva, + ImageBase, + FunctionEntry, + NULL, + ContextRecord, + EstablisherFrame, + HandlerRoutine, + HandlerData, + &UnwindParams, + UnwindFlags); } + #if 0 } // - // If we succeeded, post-process the results a bit + // If we do take an exception here, fetch the exception code as the status + // and do not propagate the exception. Since the exception handler also + // uses this function, propagating it will most likely generate the same + // exception at the same point in the unwind, and continuing will typically + // overflow the kernel stack. // - if (Status == STATUS_SUCCESS) { - - ContextRecord->Pc = ContextRecord->Lr; - *EstablisherFrame = ContextRecord->Sp; - - if (ARGUMENT_PRESENT(HandlerRoutine)) { - *HandlerRoutine = NULL; - } - *HandlerData = NULL; + __except (EXCEPTION_EXECUTE_HANDLER) { + Status = GetExceptionCode(); } - +#endif // HOST_WINDOWS return Status; } +#endif // !defined(DEBUGGER_UNWIND) + BOOL OOPStackUnwinderArm64::Unwind(T_CONTEXT * pContext) { DWORD64 ImageBase = 0; @@ -1552,7 +2715,7 @@ BOOL OOPStackUnwinderArm64::Unwind(T_CONTEXT * pContext) if (hr != S_OK) return FALSE; - PEXCEPTION_ROUTINE DummyHandlerRoutine; + PEXCEPTION_ROUTINE DummyHandlerRoutine = NULL; PVOID DummyHandlerData; DWORD64 DummyEstablisherFrame; @@ -1563,49 +2726,29 @@ BOOL OOPStackUnwinderArm64::Unwind(T_CONTEXT * pContext) if (FAILED(GetFunctionEntry(pContext->Pc, &Rfe, sizeof(Rfe)))) return FALSE; - DWORD64 ControlPcRva = pContext->Pc - ImageBase; - - // Long branch pdata - if ((Rfe.UnwindData & 3) == 3) - { - if ((Rfe.UnwindData & 4) == 0) - { - Rfe.BeginAddress = MEMORY_READ_DWORD(NULL, ImageBase + (Rfe.UnwindData - 3)); - Rfe.UnwindData = MEMORY_READ_DWORD(NULL, ImageBase + (Rfe.UnwindData - 3) + sizeof(DWORD)); - - // A long branch should never be described by another long branch - ASSERT_AND_CHECK((Rfe.UnwindData & 3) != 3); - - ControlPcRva = Rfe.BeginAddress; - - } else - { - return FALSE; - } - } + NTSTATUS Status; - if ((Rfe.UnwindData & 3) != 0) - { + Status = RtlpxVirtualUnwind(0 /* HandlerType */, + ImageBase, + pContext->Pc, + &Rfe, + pContext, + &DummyHandlerData, + &DummyEstablisherFrame, + NULL, + NULL, + NULL, + &DummyHandlerRoutine, + 0); - hr = RtlpUnwindFunctionCompact(ControlPcRva, - &Rfe, - pContext, - &DummyEstablisherFrame, - &DummyHandlerRoutine, - &DummyHandlerData, - NULL); + // + // If we fail the unwind, clear the PC to 0. This is recognized by + // many callers as a failure, given that RtlVirtualUnwind does not + // return a status code. + // - } - else - { - hr = RtlpUnwindFunctionFull(ControlPcRva, - ImageBase, - &Rfe, - pContext, - &DummyEstablisherFrame, - &DummyHandlerRoutine, - &DummyHandlerData, - NULL); + if (!NT_SUCCESS(Status)) { + pContext->Pc = 0; } // PC == 0 means unwinding is finished. @@ -1633,78 +2776,48 @@ BOOL DacUnwindStackFrame(T_CONTEXT *pContext, T_KNONVOLATILE_CONTEXT_POINTERS* p } #if defined(HOST_UNIX) + +#undef PRUNTIME_FUNCTION + PEXCEPTION_ROUTINE RtlVirtualUnwind( IN ULONG HandlerType, IN ULONG64 ImageBase, IN ULONG64 ControlPc, - IN PT_RUNTIME_FUNCTION FunctionEntry, + IN PRUNTIME_FUNCTION FunctionEntry, IN OUT PCONTEXT ContextRecord, OUT PVOID *HandlerData, OUT PULONG64 EstablisherFrame, - IN OUT PT_KNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL + IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL ) { - PEXCEPTION_ROUTINE handlerRoutine; - HRESULT hr; - - DWORD64 startingPc = ControlPc; - DWORD64 startingSp = ContextRecord->Sp; - - T_RUNTIME_FUNCTION rfe; - - rfe.BeginAddress = FunctionEntry->BeginAddress; - rfe.UnwindData = FunctionEntry->UnwindData; - - ARM64_UNWIND_PARAMS unwindParams; - unwindParams.ContextPointers = ContextPointers; - - DWORD64 ControlPcRva = ControlPc - ImageBase; - - // Long branch pdata - if ((rfe.UnwindData & 3) == 3) - { - if ((rfe.UnwindData & 4) == 0) - { - rfe.BeginAddress = MEMORY_READ_DWORD(NULL, ImageBase + (rfe.UnwindData - 3)); - rfe.UnwindData = MEMORY_READ_DWORD(NULL, ImageBase + (rfe.UnwindData - 3) + sizeof(DWORD)); - - // A long branch should never be described by another long branch - ASSERT_AND_CHECK((rfe.UnwindData & 3) != 3); - - ControlPcRva = rfe.BeginAddress; + PEXCEPTION_ROUTINE HandlerRoutine; + NTSTATUS Status; - } else - { - return FALSE; - } - } + HandlerRoutine = NULL; + Status = RtlpxVirtualUnwind(HandlerType, + ImageBase, + ControlPc, + (PIMAGE_ARM64_RUNTIME_FUNCTION_ENTRY)FunctionEntry, + ContextRecord, + HandlerData, + EstablisherFrame, + ContextPointers, + NULL, + NULL, + &HandlerRoutine, + 0); - if ((rfe.UnwindData & 3) != 0) - { - hr = RtlpUnwindFunctionCompact(ControlPcRva, - &rfe, - ContextRecord, - EstablisherFrame, - &handlerRoutine, - HandlerData, - &unwindParams); + // + // If we fail the unwind, clear the PC to 0. This is recognized by + // many callers as a failure, given that RtlVirtualUnwind does not + // return a status code. + // + if (!NT_SUCCESS(Status)) { + ContextRecord->Pc = 0; } - else - { - hr = RtlpUnwindFunctionFull(ControlPcRva, - ImageBase, - &rfe, - ContextRecord, - EstablisherFrame, - &handlerRoutine, - HandlerData, - &unwindParams); - } - - _ASSERTE(SUCCEEDED(hr)); - return handlerRoutine; + return HandlerRoutine; } #endif diff --git a/src/coreclr/vm/CMakeLists.txt b/src/coreclr/vm/CMakeLists.txt index d8054a250af41..5856acd7650da 100644 --- a/src/coreclr/vm/CMakeLists.txt +++ b/src/coreclr/vm/CMakeLists.txt @@ -652,6 +652,7 @@ if(CLR_CMAKE_TARGET_ARCH_AMD64) ${ARCH_SOURCES_DIR}/JitHelpers_InlineGetThread.asm ${ARCH_SOURCES_DIR}/JitHelpers_SingleAppDomain.asm ${ARCH_SOURCES_DIR}/JitHelpers_Slow.asm + ${ARCH_SOURCES_DIR}/patchedcode.asm ${ARCH_SOURCES_DIR}/PInvokeStubs.asm ${ARCH_SOURCES_DIR}/RedirectedHandledJITCase.asm ${ARCH_SOURCES_DIR}/ThePreStubAMD64.asm @@ -683,6 +684,7 @@ elseif(CLR_CMAKE_TARGET_ARCH_ARM64) ${ARCH_SOURCES_DIR}/AsmHelpers.asm ${ARCH_SOURCES_DIR}/CallDescrWorkerARM64.asm ${ARCH_SOURCES_DIR}/CrtHelpers.asm + ${ARCH_SOURCES_DIR}/patchedcode.asm ${ARCH_SOURCES_DIR}/PInvokeStubs.asm ${ARCH_SOURCES_DIR}/thunktemplates.asm ) @@ -705,6 +707,7 @@ else(CLR_CMAKE_TARGET_WIN32) ${ARCH_SOURCES_DIR}/jithelpers_fastwritebarriers.S ${ARCH_SOURCES_DIR}/jithelpers_singleappdomain.S ${ARCH_SOURCES_DIR}/jithelpers_slow.S + ${ARCH_SOURCES_DIR}/patchedcode.S ${ARCH_SOURCES_DIR}/pinvokestubs.S ${ARCH_SOURCES_DIR}/redirectedhandledjitcase.S ${ARCH_SOURCES_DIR}/theprestubamd64.S @@ -738,6 +741,7 @@ else(CLR_CMAKE_TARGET_WIN32) ${ARCH_SOURCES_DIR}/asmhelpers.S ${ARCH_SOURCES_DIR}/calldescrworkerarm64.S ${ARCH_SOURCES_DIR}/crthelpers.S + ${ARCH_SOURCES_DIR}/patchedcode.S ${ARCH_SOURCES_DIR}/pinvokestubs.S ${ARCH_SOURCES_DIR}/thunktemplates.S ) diff --git a/src/coreclr/vm/amd64/JitHelpers_Fast.asm b/src/coreclr/vm/amd64/JitHelpers_Fast.asm index dd5b891a44134..0f1b71b5ee93b 100644 --- a/src/coreclr/vm/amd64/JitHelpers_Fast.asm +++ b/src/coreclr/vm/amd64/JitHelpers_Fast.asm @@ -50,188 +50,6 @@ endif extern JIT_InternalThrow:proc -; Mark start of the code region that we patch at runtime -LEAF_ENTRY JIT_PatchedCodeStart, _TEXT - ret -LEAF_END JIT_PatchedCodeStart, _TEXT - - -; This is used by the mechanism to hold either the JIT_WriteBarrier_PreGrow -; or JIT_WriteBarrier_PostGrow code (depending on the state of the GC). It _WILL_ -; change at runtime as the GC changes. Initially it should simply be a copy of the -; larger of the two functions (JIT_WriteBarrier_PostGrow) to ensure we have created -; enough space to copy that code in. -LEAF_ENTRY JIT_WriteBarrier, _TEXT - align 16 - -ifdef _DEBUG - ; In debug builds, this just contains jump to the debug version of the write barrier by default - mov rax, JIT_WriteBarrier_Debug - jmp rax -endif - -ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP - ; JIT_WriteBarrier_WriteWatch_PostGrow64 - - ; Regarding patchable constants: - ; - 64-bit constants have to be loaded into a register - ; - The constants have to be aligned to 8 bytes so that they can be patched easily - ; - The constant loads have been located to minimize NOP padding required to align the constants - ; - Using different registers for successive constant loads helps pipeline better. Should we decide to use a special - ; non-volatile calling convention, this should be changed to use just one register. - - ; Do the move into the GC . It is correct to take an AV here, the EH code - ; figures out that this came from a WriteBarrier and correctly maps it back - ; to the managed method which called the WriteBarrier (see setup in - ; InitializeExceptionHandling, vm\exceptionhandling.cpp). - mov [rcx], rdx - - ; Update the write watch table if necessary - mov rax, rcx - mov r8, 0F0F0F0F0F0F0F0F0h - shr rax, 0Ch ; SoftwareWriteWatch::AddressToTableByteIndexShift - NOP_2_BYTE ; padding for alignment of constant - mov r9, 0F0F0F0F0F0F0F0F0h - add rax, r8 - cmp byte ptr [rax], 0h - jne CheckCardTable - mov byte ptr [rax], 0FFh - - NOP_3_BYTE ; padding for alignment of constant - - ; Check the lower and upper ephemeral region bounds - CheckCardTable: - cmp rdx, r9 - jb Exit - - NOP_3_BYTE ; padding for alignment of constant - - mov r8, 0F0F0F0F0F0F0F0F0h - - cmp rdx, r8 - jae Exit - - nop ; padding for alignment of constant - - mov rax, 0F0F0F0F0F0F0F0F0h - - ; Touch the card table entry, if not already dirty. - shr rcx, 0Bh - cmp byte ptr [rcx + rax], 0FFh - jne UpdateCardTable - REPRET - - UpdateCardTable: - mov byte ptr [rcx + rax], 0FFh -ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES - mov rax, 0F0F0F0F0F0F0F0F0h - shr rcx, 0Ah - cmp byte ptr [rcx + rax], 0FFh - jne UpdateCardBundleTable - REPRET - - UpdateCardBundleTable: - mov byte ptr [rcx + rax], 0FFh -endif - ret - - align 16 - Exit: - REPRET - - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - -else - ; JIT_WriteBarrier_PostGrow64 - - ; Do the move into the GC . It is correct to take an AV here, the EH code - ; figures out that this came from a WriteBarrier and correctly maps it back - ; to the managed method which called the WriteBarrier (see setup in - ; InitializeExceptionHandling, vm\exceptionhandling.cpp). - mov [rcx], rdx - - NOP_3_BYTE ; padding for alignment of constant - - ; Can't compare a 64 bit immediate, so we have to move them into a - ; register. Values of these immediates will be patched at runtime. - ; By using two registers we can pipeline better. Should we decide to use - ; a special non-volatile calling convention, this should be changed to - ; just one. - - mov rax, 0F0F0F0F0F0F0F0F0h - - ; Check the lower and upper ephemeral region bounds - cmp rdx, rax - jb Exit - - nop ; padding for alignment of constant - - mov r8, 0F0F0F0F0F0F0F0F0h - - cmp rdx, r8 - jae Exit - - nop ; padding for alignment of constant - - mov rax, 0F0F0F0F0F0F0F0F0h - - ; Touch the card table entry, if not already dirty. - shr rcx, 0Bh - cmp byte ptr [rcx + rax], 0FFh - jne UpdateCardTable - REPRET - - UpdateCardTable: - mov byte ptr [rcx + rax], 0FFh -ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES - mov rax, 0F0F0F0F0F0F0F0F0h - shr rcx, 0Ah - cmp byte ptr [rcx + rax], 0FFh - jne UpdateCardBundleTable - REPRET - - UpdateCardBundleTable: - mov byte ptr [rcx + rax], 0FFh -endif - ret - - align 16 - Exit: - REPRET -endif - - ; make sure this is bigger than any of the others - align 16 - nop -LEAF_END_MARKED JIT_WriteBarrier, _TEXT - -; Mark start of the code region that we patch at runtime -LEAF_ENTRY JIT_PatchedCodeLast, _TEXT - ret -LEAF_END JIT_PatchedCodeLast, _TEXT - ; JIT_ByRefWriteBarrier has weird semantics, see usage in StubLinkerX86.cpp ; ; Entry: diff --git a/src/coreclr/vm/amd64/jithelpers_fast.S b/src/coreclr/vm/amd64/jithelpers_fast.S index 7578f46ce0c1d..3a2d803a1460f 100644 --- a/src/coreclr/vm/amd64/jithelpers_fast.S +++ b/src/coreclr/vm/amd64/jithelpers_fast.S @@ -5,245 +5,6 @@ #include "unixasmmacros.inc" #include "asmconstants.h" -// Mark start of the code region that we patch at runtime -LEAF_ENTRY JIT_PatchedCodeStart, _TEXT - ret -LEAF_END JIT_PatchedCodeStart, _TEXT - - -// There is an even more optimized version of these helpers possible which takes -// advantage of knowledge of which way the ephemeral heap is growing to only do 1/2 -// that check (this is more significant in the JIT_WriteBarrier case). -// -// Additionally we can look into providing helpers which will take the src/dest from -// specific registers (like x86) which _could_ (??) make for easier register allocation -// for the JIT64, however it might lead to having to have some nasty code that treats -// these guys really special like... :(. -// -// Version that does the move, checks whether or not it's in the GC and whether or not -// it needs to have it's card updated -// -// void JIT_CheckedWriteBarrier(Object** dst, Object* src) -LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT - - // When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference - // but if it isn't then it will just return. - // - // See if this is in GCHeap - PREPARE_EXTERNAL_VAR g_lowest_address, rax - cmp rdi, [rax] - // jb LOCAL_LABEL(NotInHeap) - .byte 0x72, 0x12 - PREPARE_EXTERNAL_VAR g_highest_address, rax - cmp rdi, [rax] - - // jnb LOCAL_LABEL(NotInHeap) - .byte 0x73, 0x06 - jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)] - - LOCAL_LABEL(NotInHeap): - // See comment above about possible AV - mov [rdi], rsi - ret -LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT - - -// This is used by the mechanism to hold either the JIT_WriteBarrier_PreGrow -// or JIT_WriteBarrier_PostGrow code (depending on the state of the GC). It _WILL_ -// change at runtime as the GC changes. Initially it should simply be a copy of the -// larger of the two functions (JIT_WriteBarrier_PostGrow) to ensure we have created -// enough space to copy that code in. -.balign 16 -LEAF_ENTRY JIT_WriteBarrier, _TEXT -#ifdef _DEBUG - // In debug builds, this just contains jump to the debug version of the write barrier by default - jmp C_FUNC(JIT_WriteBarrier_Debug) -#endif - -#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP - // JIT_WriteBarrier_WriteWatch_PostGrow64 - - // Regarding patchable constants: - // - 64-bit constants have to be loaded into a register - // - The constants have to be aligned to 8 bytes so that they can be patched easily - // - The constant loads have been located to minimize NOP padding required to align the constants - // - Using different registers for successive constant loads helps pipeline better. Should we decide to use a special - // non-volatile calling convention, this should be changed to use just one register. - - // Do the move into the GC . It is correct to take an AV here, the EH code - // figures out that this came from a WriteBarrier and correctly maps it back - // to the managed method which called the WriteBarrier (see setup in - // InitializeExceptionHandling, vm\exceptionhandling.cpp). - mov [rdi], rsi - - // Update the write watch table if necessary - mov rax, rdi - movabs r10, 0xF0F0F0F0F0F0F0F0 - shr rax, 0xC // SoftwareWriteWatch::AddressToTableByteIndexShift - NOP_2_BYTE // padding for alignment of constant - movabs r11, 0xF0F0F0F0F0F0F0F0 - add rax, r10 - cmp byte ptr [rax], 0x0 - .byte 0x75, 0x06 - // jne LOCAL_LABEL(CheckCardTable) - mov byte ptr [rax], 0xFF - - NOP_3_BYTE // padding for alignment of constant - - // Check the lower and upper ephemeral region bounds - LOCAL_LABEL(CheckCardTable): - cmp rsi, r11 - .byte 0x72,0x3D - // jb LOCAL_LABEL(Exit) - - NOP_3_BYTE // padding for alignment of constant - - movabs r10, 0xF0F0F0F0F0F0F0F0 - - cmp rsi, r10 - .byte 0x73,0x2B - // jae LOCAL_LABEL(Exit) - - nop // padding for alignment of constant - - movabs rax, 0xF0F0F0F0F0F0F0F0 - - // Touch the card table entry, if not already dirty. - shr rdi, 0x0B - cmp byte ptr [rdi + rax], 0xFF - .byte 0x75, 0x02 - // jne LOCAL_LABEL(UpdateCardTable) - REPRET - - LOCAL_LABEL(UpdateCardTable): - mov byte ptr [rdi + rax], 0xFF - -#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES - NOP_2_BYTE // padding for alignment of constant - shr rdi, 0x0A - - movabs rax, 0xF0F0F0F0F0F0F0F0 - cmp byte ptr [rdi + rax], 0xFF - - .byte 0x75, 0x02 - // jne LOCAL_LABEL(UpdateCardBundle_WriteWatch_PostGrow64) - REPRET - - LOCAL_LABEL(UpdateCardBundle_WriteWatch_PostGrow64): - mov byte ptr [rdi + rax], 0xFF -#endif - - ret - - .balign 16 - LOCAL_LABEL(Exit): - REPRET - - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - NOP_3_BYTE - -#else - // JIT_WriteBarrier_PostGrow64 - - // Do the move into the GC . It is correct to take an AV here, the EH code - // figures out that this came from a WriteBarrier and correctly maps it back - // to the managed method which called the WriteBarrier (see setup in - // InitializeExceptionHandling, vm\exceptionhandling.cpp). - mov [rdi], rsi - - NOP_3_BYTE // padding for alignment of constant - - // Can't compare a 64 bit immediate, so we have to move them into a - // register. Values of these immediates will be patched at runtime. - // By using two registers we can pipeline better. Should we decide to use - // a special non-volatile calling convention, this should be changed to - // just one. - - movabs rax, 0xF0F0F0F0F0F0F0F0 - - // Check the lower and upper ephemeral region bounds - cmp rsi, rax - // jb LOCAL_LABEL(Exit) - .byte 0x72, 0x36 - - nop // padding for alignment of constant - - movabs r8, 0xF0F0F0F0F0F0F0F0 - - cmp rsi, r8 - // jae LOCAL_LABEL(Exit) - .byte 0x73, 0x26 - - nop // padding for alignment of constant - - movabs rax, 0xF0F0F0F0F0F0F0F0 - - // Touch the card table entry, if not already dirty. - shr rdi, 0Bh - cmp byte ptr [rdi + rax], 0FFh - .byte 0x75, 0x02 - // jne LOCAL_LABEL(UpdateCardTable) - REPRET - - LOCAL_LABEL(UpdateCardTable): - mov byte ptr [rdi + rax], 0FFh - -#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES - NOP_6_BYTE // padding for alignment of constant - - movabs rax, 0xF0F0F0F0F0F0F0F0 - - // Touch the card bundle, if not already dirty. - // rdi is already shifted by 0xB, so shift by 0xA more - shr rdi, 0x0A - cmp byte ptr [rdi + rax], 0FFh - - .byte 0x75, 0x02 - // jne LOCAL_LABEL(UpdateCardBundle) - REPRET - - LOCAL_LABEL(UpdateCardBundle): - mov byte ptr [rdi + rax], 0FFh -#endif - - ret - - .balign 16 - LOCAL_LABEL(Exit): - REPRET -#endif - - // make sure this is bigger than any of the others - .balign 16 - nop -LEAF_END_MARKED JIT_WriteBarrier, _TEXT - -// Mark start of the code region that we patch at runtime -LEAF_ENTRY JIT_PatchedCodeLast, _TEXT - ret -LEAF_END JIT_PatchedCodeLast, _TEXT - // JIT_ByRefWriteBarrier has weird semantics, see usage in StubLinkerX86.cpp // // Entry: diff --git a/src/coreclr/vm/amd64/patchedcode.S b/src/coreclr/vm/amd64/patchedcode.S new file mode 100644 index 0000000000000..9af4e3ce855b7 --- /dev/null +++ b/src/coreclr/vm/amd64/patchedcode.S @@ -0,0 +1,245 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +.intel_syntax noprefix +#include "unixasmmacros.inc" +#include "asmconstants.h" + +// Mark start of the code region that we patch at runtime +LEAF_ENTRY JIT_PatchedCodeStart, _TEXT + ret +LEAF_END JIT_PatchedCodeStart, _TEXT + + +// There is an even more optimized version of these helpers possible which takes +// advantage of knowledge of which way the ephemeral heap is growing to only do 1/2 +// that check (this is more significant in the JIT_WriteBarrier case). +// +// Additionally we can look into providing helpers which will take the src/dest from +// specific registers (like x86) which _could_ (??) make for easier register allocation +// for the JIT64, however it might lead to having to have some nasty code that treats +// these guys really special like... :(. +// +// Version that does the move, checks whether or not it's in the GC and whether or not +// it needs to have it's card updated +// +// void JIT_CheckedWriteBarrier(Object** dst, Object* src) +LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT + + // When WRITE_BARRIER_CHECK is defined _NotInHeap will write the reference + // but if it isn't then it will just return. + // + // See if this is in GCHeap + PREPARE_EXTERNAL_VAR g_lowest_address, rax + cmp rdi, [rax] + // jb LOCAL_LABEL(NotInHeap) + .byte 0x72, 0x12 + PREPARE_EXTERNAL_VAR g_highest_address, rax + cmp rdi, [rax] + + // jnb LOCAL_LABEL(NotInHeap) + .byte 0x73, 0x06 + jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)] + + LOCAL_LABEL(NotInHeap): + // See comment above about possible AV + mov [rdi], rsi + ret +LEAF_END_MARKED JIT_CheckedWriteBarrier, _TEXT + + +// This is used by the mechanism to hold either the JIT_WriteBarrier_PreGrow +// or JIT_WriteBarrier_PostGrow code (depending on the state of the GC). It _WILL_ +// change at runtime as the GC changes. Initially it should simply be a copy of the +// larger of the two functions (JIT_WriteBarrier_PostGrow) to ensure we have created +// enough space to copy that code in. +.balign 16 +LEAF_ENTRY JIT_WriteBarrier, _TEXT +#ifdef _DEBUG + // In debug builds, this just contains jump to the debug version of the write barrier by default + jmp C_FUNC(JIT_WriteBarrier_Debug) +#endif + +#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP + // JIT_WriteBarrier_WriteWatch_PostGrow64 + + // Regarding patchable constants: + // - 64-bit constants have to be loaded into a register + // - The constants have to be aligned to 8 bytes so that they can be patched easily + // - The constant loads have been located to minimize NOP padding required to align the constants + // - Using different registers for successive constant loads helps pipeline better. Should we decide to use a special + // non-volatile calling convention, this should be changed to use just one register. + + // Do the move into the GC . It is correct to take an AV here, the EH code + // figures out that this came from a WriteBarrier and correctly maps it back + // to the managed method which called the WriteBarrier (see setup in + // InitializeExceptionHandling, vm\exceptionhandling.cpp). + mov [rdi], rsi + + // Update the write watch table if necessary + mov rax, rdi + movabs r10, 0xF0F0F0F0F0F0F0F0 + shr rax, 0xC // SoftwareWriteWatch::AddressToTableByteIndexShift + NOP_2_BYTE // padding for alignment of constant + movabs r11, 0xF0F0F0F0F0F0F0F0 + add rax, r10 + cmp byte ptr [rax], 0x0 + .byte 0x75, 0x06 + // jne LOCAL_LABEL(CheckCardTable) + mov byte ptr [rax], 0xFF + + NOP_3_BYTE // padding for alignment of constant + + // Check the lower and upper ephemeral region bounds + LOCAL_LABEL(CheckCardTable): + cmp rsi, r11 + .byte 0x72,0x3D + // jb LOCAL_LABEL(Exit) + + NOP_3_BYTE // padding for alignment of constant + + movabs r10, 0xF0F0F0F0F0F0F0F0 + + cmp rsi, r10 + .byte 0x73,0x2B + // jae LOCAL_LABEL(Exit) + + nop // padding for alignment of constant + + movabs rax, 0xF0F0F0F0F0F0F0F0 + + // Touch the card table entry, if not already dirty. + shr rdi, 0x0B + cmp byte ptr [rdi + rax], 0xFF + .byte 0x75, 0x02 + // jne LOCAL_LABEL(UpdateCardTable) + REPRET + + LOCAL_LABEL(UpdateCardTable): + mov byte ptr [rdi + rax], 0xFF + +#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES + NOP_2_BYTE // padding for alignment of constant + shr rdi, 0x0A + + movabs rax, 0xF0F0F0F0F0F0F0F0 + cmp byte ptr [rdi + rax], 0xFF + + .byte 0x75, 0x02 + // jne LOCAL_LABEL(UpdateCardBundle_WriteWatch_PostGrow64) + REPRET + + LOCAL_LABEL(UpdateCardBundle_WriteWatch_PostGrow64): + mov byte ptr [rdi + rax], 0xFF +#endif + + ret + + .balign 16 + LOCAL_LABEL(Exit): + REPRET + + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + +#else + // JIT_WriteBarrier_PostGrow64 + + // Do the move into the GC . It is correct to take an AV here, the EH code + // figures out that this came from a WriteBarrier and correctly maps it back + // to the managed method which called the WriteBarrier (see setup in + // InitializeExceptionHandling, vm\exceptionhandling.cpp). + mov [rdi], rsi + + NOP_3_BYTE // padding for alignment of constant + + // Can't compare a 64 bit immediate, so we have to move them into a + // register. Values of these immediates will be patched at runtime. + // By using two registers we can pipeline better. Should we decide to use + // a special non-volatile calling convention, this should be changed to + // just one. + + movabs rax, 0xF0F0F0F0F0F0F0F0 + + // Check the lower and upper ephemeral region bounds + cmp rsi, rax + // jb LOCAL_LABEL(Exit) + .byte 0x72, 0x36 + + nop // padding for alignment of constant + + movabs r8, 0xF0F0F0F0F0F0F0F0 + + cmp rsi, r8 + // jae LOCAL_LABEL(Exit) + .byte 0x73, 0x26 + + nop // padding for alignment of constant + + movabs rax, 0xF0F0F0F0F0F0F0F0 + + // Touch the card table entry, if not already dirty. + shr rdi, 0Bh + cmp byte ptr [rdi + rax], 0FFh + .byte 0x75, 0x02 + // jne LOCAL_LABEL(UpdateCardTable) + REPRET + + LOCAL_LABEL(UpdateCardTable): + mov byte ptr [rdi + rax], 0FFh + +#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES + NOP_6_BYTE // padding for alignment of constant + + movabs rax, 0xF0F0F0F0F0F0F0F0 + + // Touch the card bundle, if not already dirty. + // rdi is already shifted by 0xB, so shift by 0xA more + shr rdi, 0x0A + cmp byte ptr [rdi + rax], 0FFh + + .byte 0x75, 0x02 + // jne LOCAL_LABEL(UpdateCardBundle) + REPRET + + LOCAL_LABEL(UpdateCardBundle): + mov byte ptr [rdi + rax], 0FFh +#endif + + ret + + .balign 16 + LOCAL_LABEL(Exit): + REPRET +#endif + + // make sure this is bigger than any of the others + .balign 16 + nop +LEAF_END_MARKED JIT_WriteBarrier, _TEXT + +// Mark start of the code region that we patch at runtime +LEAF_ENTRY JIT_PatchedCodeLast, _TEXT + ret +LEAF_END JIT_PatchedCodeLast, _TEXT diff --git a/src/coreclr/vm/amd64/patchedcode.asm b/src/coreclr/vm/amd64/patchedcode.asm new file mode 100644 index 0000000000000..56d325979f69c --- /dev/null +++ b/src/coreclr/vm/amd64/patchedcode.asm @@ -0,0 +1,202 @@ +; Licensed to the .NET Foundation under one or more agreements. +; The .NET Foundation licenses this file to you under the MIT license. + +; *********************************************************************** +; File: patchedcode.asm +; +; Notes: routinues which are patched at runtime and need to be linked in +; their declared order. +; *********************************************************************** + + +include AsmMacros.inc +include asmconstants.inc + +ifdef _DEBUG +extern JIT_WriteBarrier_Debug:proc +endif + + +; Mark start of the code region that we patch at runtime +LEAF_ENTRY JIT_PatchedCodeStart, _TEXT + ret +LEAF_END JIT_PatchedCodeStart, _TEXT + + +; This is used by the mechanism to hold either the JIT_WriteBarrier_PreGrow +; or JIT_WriteBarrier_PostGrow code (depending on the state of the GC). It _WILL_ +; change at runtime as the GC changes. Initially it should simply be a copy of the +; larger of the two functions (JIT_WriteBarrier_PostGrow) to ensure we have created +; enough space to copy that code in. +LEAF_ENTRY JIT_WriteBarrier, _TEXT + align 16 + +ifdef _DEBUG + ; In debug builds, this just contains jump to the debug version of the write barrier by default + mov rax, JIT_WriteBarrier_Debug + jmp rax +endif + +ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP + ; JIT_WriteBarrier_WriteWatch_PostGrow64 + + ; Regarding patchable constants: + ; - 64-bit constants have to be loaded into a register + ; - The constants have to be aligned to 8 bytes so that they can be patched easily + ; - The constant loads have been located to minimize NOP padding required to align the constants + ; - Using different registers for successive constant loads helps pipeline better. Should we decide to use a special + ; non-volatile calling convention, this should be changed to use just one register. + + ; Do the move into the GC . It is correct to take an AV here, the EH code + ; figures out that this came from a WriteBarrier and correctly maps it back + ; to the managed method which called the WriteBarrier (see setup in + ; InitializeExceptionHandling, vm\exceptionhandling.cpp). + mov [rcx], rdx + + ; Update the write watch table if necessary + mov rax, rcx + mov r8, 0F0F0F0F0F0F0F0F0h + shr rax, 0Ch ; SoftwareWriteWatch::AddressToTableByteIndexShift + NOP_2_BYTE ; padding for alignment of constant + mov r9, 0F0F0F0F0F0F0F0F0h + add rax, r8 + cmp byte ptr [rax], 0h + jne CheckCardTable + mov byte ptr [rax], 0FFh + + NOP_3_BYTE ; padding for alignment of constant + + ; Check the lower and upper ephemeral region bounds + CheckCardTable: + cmp rdx, r9 + jb Exit + + NOP_3_BYTE ; padding for alignment of constant + + mov r8, 0F0F0F0F0F0F0F0F0h + + cmp rdx, r8 + jae Exit + + nop ; padding for alignment of constant + + mov rax, 0F0F0F0F0F0F0F0F0h + + ; Touch the card table entry, if not already dirty. + shr rcx, 0Bh + cmp byte ptr [rcx + rax], 0FFh + jne UpdateCardTable + REPRET + + UpdateCardTable: + mov byte ptr [rcx + rax], 0FFh +ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES + mov rax, 0F0F0F0F0F0F0F0F0h + shr rcx, 0Ah + cmp byte ptr [rcx + rax], 0FFh + jne UpdateCardBundleTable + REPRET + + UpdateCardBundleTable: + mov byte ptr [rcx + rax], 0FFh +endif + ret + + align 16 + Exit: + REPRET + + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + NOP_3_BYTE + +else + ; JIT_WriteBarrier_PostGrow64 + + ; Do the move into the GC . It is correct to take an AV here, the EH code + ; figures out that this came from a WriteBarrier and correctly maps it back + ; to the managed method which called the WriteBarrier (see setup in + ; InitializeExceptionHandling, vm\exceptionhandling.cpp). + mov [rcx], rdx + + NOP_3_BYTE ; padding for alignment of constant + + ; Can't compare a 64 bit immediate, so we have to move them into a + ; register. Values of these immediates will be patched at runtime. + ; By using two registers we can pipeline better. Should we decide to use + ; a special non-volatile calling convention, this should be changed to + ; just one. + + mov rax, 0F0F0F0F0F0F0F0F0h + + ; Check the lower and upper ephemeral region bounds + cmp rdx, rax + jb Exit + + nop ; padding for alignment of constant + + mov r8, 0F0F0F0F0F0F0F0F0h + + cmp rdx, r8 + jae Exit + + nop ; padding for alignment of constant + + mov rax, 0F0F0F0F0F0F0F0F0h + + ; Touch the card table entry, if not already dirty. + shr rcx, 0Bh + cmp byte ptr [rcx + rax], 0FFh + jne UpdateCardTable + REPRET + + UpdateCardTable: + mov byte ptr [rcx + rax], 0FFh +ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES + mov rax, 0F0F0F0F0F0F0F0F0h + shr rcx, 0Ah + cmp byte ptr [rcx + rax], 0FFh + jne UpdateCardBundleTable + REPRET + + UpdateCardBundleTable: + mov byte ptr [rcx + rax], 0FFh +endif + ret + + align 16 + Exit: + REPRET +endif + + ; make sure this is bigger than any of the others + align 16 + nop +LEAF_END_MARKED JIT_WriteBarrier, _TEXT + +; Mark start of the code region that we patch at runtime +LEAF_ENTRY JIT_PatchedCodeLast, _TEXT + ret +LEAF_END JIT_PatchedCodeLast, _TEXT + + end diff --git a/src/coreclr/vm/arm64/asmhelpers.S b/src/coreclr/vm/arm64/asmhelpers.S index a7c65bb713c5c..0edbb3fdf92fc 100644 --- a/src/coreclr/vm/arm64/asmhelpers.S +++ b/src/coreclr/vm/arm64/asmhelpers.S @@ -174,26 +174,6 @@ C_FUNC(ThePreStubPatchLabel): ret lr LEAF_END ThePreStubPatch, _TEXT - -//----------------------------------------------------------------------------- -// The following Macros help in WRITE_BARRIER Implementations -// WRITE_BARRIER_ENTRY -// -// Declare the start of a write barrier function. Use similarly to NESTED_ENTRY. This is the only legal way -// to declare a write barrier function. -// -.macro WRITE_BARRIER_ENTRY name - LEAF_ENTRY \name, _TEXT -.endm - -// WRITE_BARRIER_END -// -// The partner to WRITE_BARRIER_ENTRY, used like NESTED_END. -// -.macro WRITE_BARRIER_END name - LEAF_END_MARKED \name, _TEXT -.endm - // void JIT_UpdateWriteBarrierState(bool skipEphemeralCheck, size_t writeableOffset) // // Update shadow copies of the various state info required for barrier @@ -205,7 +185,7 @@ LEAF_END ThePreStubPatch, _TEXT // Align and group state info together so it fits in a single cache line // and each entry can be written atomically // -WRITE_BARRIER_ENTRY JIT_UpdateWriteBarrierState +LEAF_ENTRY JIT_UpdateWriteBarrierState, _TEXT PROLOG_SAVE_REG_PAIR_INDEXED fp, lr, -16 // x0-x7, x10 will contain intended new state @@ -269,7 +249,7 @@ LOCAL_LABEL(EphemeralCheckEnabled): EPILOG_RESTORE_REG_PAIR_INDEXED fp, lr, 16 EPILOG_RETURN -WRITE_BARRIER_END JIT_UpdateWriteBarrierState +LEAF_END JIT_UpdateWriteBarrierState // ------------------------// ------------------------------------------------------------------ // __declspec(naked) void F_CALL_CONV JIT_WriteBarrier_Callable(Object **dst, Object* val) @@ -285,214 +265,6 @@ LEAF_ENTRY JIT_WriteBarrier_Callable, _TEXT br x17 LEAF_END JIT_WriteBarrier_Callable, _TEXT -.balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line -//------------------------------------------ -// Start of the writeable code region -LEAF_ENTRY JIT_PatchedCodeStart, _TEXT - ret lr -LEAF_END JIT_PatchedCodeStart, _TEXT - -// void JIT_ByRefWriteBarrier -// On entry: -// x13 : the source address (points to object reference to write) -// x14 : the destination address (object reference written here) -// -// On exit: -// x12 : trashed -// x13 : incremented by 8 -// x14 : incremented by 8 -// x15 : trashed -// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP -// -WRITE_BARRIER_ENTRY JIT_ByRefWriteBarrier - - ldr x15, [x13], 8 - b C_FUNC(JIT_CheckedWriteBarrier) - -WRITE_BARRIER_END JIT_ByRefWriteBarrier - -//----------------------------------------------------------------------------- -// Simple WriteBarriers -// void JIT_CheckedWriteBarrier(Object** dst, Object* src) -// On entry: -// x14 : the destination address (LHS of the assignment) -// x15 : the object reference (RHS of the assignment) -// -// On exit: -// x12 : trashed -// x14 : trashed (incremented by 8 to implement JIT_ByRefWriteBarrier contract) -// x15 : trashed -// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP -// -WRITE_BARRIER_ENTRY JIT_CheckedWriteBarrier - ldr x12, LOCAL_LABEL(wbs_lowest_address) - cmp x14, x12 - - ldr x12, LOCAL_LABEL(wbs_highest_address) - - // Compare against the upper bound if the previous comparison indicated - // that the destination address is greater than or equal to the lower - // bound. Otherwise, set the C flag (specified by the 0x2) so that the - // branch below is not taken. - ccmp x14, x12, #0x2, hs - - bhs LOCAL_LABEL(NotInHeap) - - b C_FUNC(JIT_WriteBarrier) - -LOCAL_LABEL(NotInHeap): - str x15, [x14], 8 - ret lr -WRITE_BARRIER_END JIT_CheckedWriteBarrier - -// void JIT_WriteBarrier(Object** dst, Object* src) -// On entry: -// x14 : the destination address (LHS of the assignment) -// x15 : the object reference (RHS of the assignment) -// -// On exit: -// x12 : trashed -// x14 : trashed (incremented by 8 to implement JIT_ByRefWriteBarrier contract) -// x15 : trashed -// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP -// -WRITE_BARRIER_ENTRY JIT_WriteBarrier - stlr x15, [x14] - -#ifdef WRITE_BARRIER_CHECK - // Update GC Shadow Heap - - // Do not perform the work if g_GCShadow is 0 - ldr x12, LOCAL_LABEL(wbs_GCShadow) - cbz x12, LOCAL_LABEL(ShadowUpdateDisabled) - - // need temporary register. Save before using. - str x13, [sp, #-16]! - - // Compute address of shadow heap location: - // pShadow = g_GCShadow + (x14 - g_lowest_address) - ldr x13, LOCAL_LABEL(wbs_lowest_address) - sub x13, x14, x13 - add x12, x13, x12 - - // if (pShadow >= g_GCShadowEnd) goto end - ldr x13, LOCAL_LABEL(wbs_GCShadowEnd) - cmp x12, x13 - bhs LOCAL_LABEL(ShadowUpdateEnd) - - // *pShadow = x15 - str x15, [x12] - - // Ensure that the write to the shadow heap occurs before the read from the GC heap so that race - // conditions are caught by INVALIDGCVALUE. - dmb ish - - // if ([x14] == x15) goto end - ldr x13, [x14] - cmp x13, x15 - beq LOCAL_LABEL(ShadowUpdateEnd) - - // *pShadow = INVALIDGCVALUE (0xcccccccd) - movz x13, #0xcccd - movk x13, #0xcccc, LSL #16 - str x13, [x12] - -LOCAL_LABEL(ShadowUpdateEnd): - ldr x13, [sp], #16 -LOCAL_LABEL(ShadowUpdateDisabled): -#endif - -#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP - // Update the write watch table if necessary - ldr x12, LOCAL_LABEL(wbs_sw_ww_table) - cbz x12, LOCAL_LABEL(CheckCardTable) - add x12, x12, x14, lsr #0xc // SoftwareWriteWatch::AddressToTableByteIndexShift - ldrb w17, [x12] - cbnz x17, LOCAL_LABEL(CheckCardTable) - mov w17, #0xFF - strb w17, [x12] -#endif - -LOCAL_LABEL(CheckCardTable): - // Branch to Exit if the reference is not in the Gen0 heap - // - ldr x12, LOCAL_LABEL(wbs_ephemeral_low) - cbz x12, LOCAL_LABEL(SkipEphemeralCheck) - cmp x15, x12 - - ldr x12, LOCAL_LABEL(wbs_ephemeral_high) - - // Compare against the upper bound if the previous comparison indicated - // that the destination address is greater than or equal to the lower - // bound. Otherwise, set the C flag (specified by the 0x2) so that the - // branch to exit is taken. - ccmp x15, x12, #0x2, hs - - bhs LOCAL_LABEL(Exit) - -LOCAL_LABEL(SkipEphemeralCheck): - // Check if we need to update the card table - ldr x12, LOCAL_LABEL(wbs_card_table) - add x15, x12, x14, lsr #11 - ldrb w12, [x15] - cmp x12, 0xFF - beq LOCAL_LABEL(Exit) - -LOCAL_LABEL(UpdateCardTable): - mov x12, 0xFF - strb w12, [x15] - -#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES - // Check if we need to update the card bundle table - ldr x12, LOCAL_LABEL(wbs_card_bundle_table) - add x15, x12, x14, lsr #21 - ldrb w12, [x15] - cmp x12, 0xFF - beq LOCAL_LABEL(Exit) - -LOCAL_LABEL(UpdateCardBundle): - mov x12, 0xFF - strb w12, [x15] -#endif - -LOCAL_LABEL(Exit): - add x14, x14, 8 - ret lr -WRITE_BARRIER_END JIT_WriteBarrier - - // Begin patchable literal pool - .balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line -WRITE_BARRIER_ENTRY JIT_WriteBarrier_Table -LOCAL_LABEL(wbs_begin): -LOCAL_LABEL(wbs_card_table): - .quad 0 -LOCAL_LABEL(wbs_card_bundle_table): - .quad 0 -LOCAL_LABEL(wbs_sw_ww_table): - .quad 0 -LOCAL_LABEL(wbs_ephemeral_low): - .quad 0 -LOCAL_LABEL(wbs_ephemeral_high): - .quad 0 -LOCAL_LABEL(wbs_lowest_address): - .quad 0 -LOCAL_LABEL(wbs_highest_address): - .quad 0 -#ifdef WRITE_BARRIER_CHECK -LOCAL_LABEL(wbs_GCShadow): - .quad 0 -LOCAL_LABEL(wbs_GCShadowEnd): - .quad 0 -#endif -WRITE_BARRIER_END JIT_WriteBarrier_Table - - -// ------------------------------------------------------------------ -// End of the writeable code region -LEAF_ENTRY JIT_PatchedCodeLast, _TEXT - ret lr -LEAF_END JIT_PatchedCodeLast, _TEXT - // void SinglecastDelegateInvokeStub(Delegate *pThis) LEAF_ENTRY SinglecastDelegateInvokeStub, _TEXT cmp x0, #0 diff --git a/src/coreclr/vm/arm64/asmhelpers.asm b/src/coreclr/vm/arm64/asmhelpers.asm index d72d3ad7863e1..bc88d15ee330f 100644 --- a/src/coreclr/vm/arm64/asmhelpers.asm +++ b/src/coreclr/vm/arm64/asmhelpers.asm @@ -233,36 +233,6 @@ ThePreStubPatchLabel ret lr LEAF_END -;----------------------------------------------------------------------------- -; The following Macros help in WRITE_BARRIER Implementations - ; WRITE_BARRIER_ENTRY - ; - ; Declare the start of a write barrier function. Use similarly to NESTED_ENTRY. This is the only legal way - ; to declare a write barrier function. - ; - MACRO - WRITE_BARRIER_ENTRY $name - - LEAF_ENTRY $name - MEND - - ; WRITE_BARRIER_END - ; - ; The partner to WRITE_BARRIER_ENTRY, used like NESTED_END. - ; - MACRO - WRITE_BARRIER_END $__write_barrier_name - - LEAF_END_MARKED $__write_barrier_name - - MEND - -; ------------------------------------------------------------------ -; Start of the writeable code region - LEAF_ENTRY JIT_PatchedCodeStart - ret lr - LEAF_END - ;----------------------------------------------------------------------------- ; void JIT_UpdateWriteBarrierState(bool skipEphemeralCheck, size_t writeableOffset) ; @@ -275,7 +245,7 @@ ThePreStubPatchLabel ; Align and group state info together so it fits in a single cache line ; and each entry can be written atomically ; - WRITE_BARRIER_ENTRY JIT_UpdateWriteBarrierState + LEAF_ENTRY JIT_UpdateWriteBarrierState PROLOG_SAVE_REG_PAIR fp, lr, #-16! ; x0-x7, x10 will contain intended new state @@ -339,204 +309,7 @@ EphemeralCheckEnabled EPILOG_RESTORE_REG_PAIR fp, lr, #16! EPILOG_RETURN - WRITE_BARRIER_END JIT_UpdateWriteBarrierState - - ; Begin patchable literal pool - ALIGN 64 ; Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line - WRITE_BARRIER_ENTRY JIT_WriteBarrier_Table -wbs_begin -wbs_card_table - DCQ 0 -wbs_card_bundle_table - DCQ 0 -wbs_sw_ww_table - DCQ 0 -wbs_ephemeral_low - DCQ 0 -wbs_ephemeral_high - DCQ 0 -wbs_lowest_address - DCQ 0 -wbs_highest_address - DCQ 0 -#ifdef WRITE_BARRIER_CHECK -wbs_GCShadow - DCQ 0 -wbs_GCShadowEnd - DCQ 0 -#endif - WRITE_BARRIER_END JIT_WriteBarrier_Table - -; void JIT_ByRefWriteBarrier -; On entry: -; x13 : the source address (points to object reference to write) -; x14 : the destination address (object reference written here) -; -; On exit: -; x12 : trashed -; x13 : incremented by 8 -; x14 : incremented by 8 -; x15 : trashed -; x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP -; - WRITE_BARRIER_ENTRY JIT_ByRefWriteBarrier - - ldr x15, [x13], 8 - b JIT_CheckedWriteBarrier - - WRITE_BARRIER_END JIT_ByRefWriteBarrier - -;----------------------------------------------------------------------------- -; Simple WriteBarriers -; void JIT_CheckedWriteBarrier(Object** dst, Object* src) -; On entry: -; x14 : the destination address (LHS of the assignment) -; x15 : the object reference (RHS of the assignment) -; -; On exit: -; x12 : trashed -; x14 : incremented by 8 -; x15 : trashed -; x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP -; - WRITE_BARRIER_ENTRY JIT_CheckedWriteBarrier - ldr x12, wbs_lowest_address - cmp x14, x12 - - ldr x12, wbs_highest_address - ccmphs x14, x12, #0x2 - blo JIT_WriteBarrier - -NotInHeap - str x15, [x14], 8 - ret lr - WRITE_BARRIER_END JIT_CheckedWriteBarrier - -; void JIT_WriteBarrier(Object** dst, Object* src) -; On entry: -; x14 : the destination address (LHS of the assignment) -; x15 : the object reference (RHS of the assignment) -; -; On exit: -; x12 : trashed -; x14 : incremented by 8 -; x15 : trashed -; x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP -; - WRITE_BARRIER_ENTRY JIT_WriteBarrier - stlr x15, [x14] - -#ifdef WRITE_BARRIER_CHECK - ; Update GC Shadow Heap - - ; Do not perform the work if g_GCShadow is 0 - ldr x12, wbs_GCShadow - cbz x12, ShadowUpdateDisabled - - ; need temporary register. Save before using. - str x13, [sp, #-16]! - - ; Compute address of shadow heap location: - ; pShadow = $g_GCShadow + (x14 - g_lowest_address) - ldr x13, wbs_lowest_address - sub x13, x14, x13 - add x12, x13, x12 - - ; if (pShadow >= $g_GCShadowEnd) goto end - ldr x13, wbs_GCShadowEnd - cmp x12, x13 - bhs ShadowUpdateEnd - - ; *pShadow = x15 - str x15, [x12] - - ; Ensure that the write to the shadow heap occurs before the read from the GC heap so that race - ; conditions are caught by INVALIDGCVALUE. - dmb ish - - ; if ([x14] == x15) goto end - ldr x13, [x14] - cmp x13, x15 - beq ShadowUpdateEnd - - ; *pShadow = INVALIDGCVALUE (0xcccccccd) - movz x13, #0xcccd - movk x13, #0xcccc, LSL #16 - str x13, [x12] - -ShadowUpdateEnd - ldr x13, [sp], #16 -ShadowUpdateDisabled -#endif - -#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP - ; Update the write watch table if necessary - ldr x12, wbs_sw_ww_table - cbz x12, CheckCardTable - add x12, x12, x14, LSR #0xC // SoftwareWriteWatch::AddressToTableByteIndexShift - ldrb w17, [x12] - cbnz x17, CheckCardTable - mov w17, 0xFF - strb w17, [x12] -#endif - -CheckCardTable - ; Branch to Exit if the reference is not in the Gen0 heap - ; - ldr x12, wbs_ephemeral_low - cbz x12, SkipEphemeralCheck - cmp x15, x12 - - ldr x12, wbs_ephemeral_high - - ; Compare against the upper bound if the previous comparison indicated - ; that the destination address is greater than or equal to the lower - ; bound. Otherwise, set the C flag (specified by the 0x2) so that the - ; branch to exit is taken. - ccmp x15, x12, #0x2, hs - - bhs Exit - -SkipEphemeralCheck - ; Check if we need to update the card table - ldr x12, wbs_card_table - - ; x15 := pointer into card table - add x15, x12, x14, lsr #11 - - ldrb w12, [x15] - cmp x12, 0xFF - beq Exit - -UpdateCardTable - mov x12, 0xFF - strb w12, [x15] - -#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES - ; Check if we need to update the card bundle table - ldr x12, wbs_card_bundle_table - - ; x15 := pointer into card bundle table - add x15, x12, x14, lsr #21 - - ldrb w12, [x15] - cmp x12, 0xFF - beq Exit - - mov x12, 0xFF - strb w12, [x15] -#endif - -Exit - add x14, x14, 8 - ret lr - WRITE_BARRIER_END JIT_WriteBarrier - -; ------------------------------------------------------------------ -; End of the writeable code region - LEAF_ENTRY JIT_PatchedCodeLast - ret lr - LEAF_END + LEAF_END JIT_UpdateWriteBarrierState ; void SinglecastDelegateInvokeStub(Delegate *pThis) LEAF_ENTRY SinglecastDelegateInvokeStub diff --git a/src/coreclr/vm/arm64/patchedcode.S b/src/coreclr/vm/arm64/patchedcode.S new file mode 100644 index 0000000000000..2c1199be69a78 --- /dev/null +++ b/src/coreclr/vm/arm64/patchedcode.S @@ -0,0 +1,232 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +#include "asmconstants.h" +#include "unixasmmacros.inc" + +//----------------------------------------------------------------------------- +// The following Macros help in WRITE_BARRIER Implementations +// WRITE_BARRIER_ENTRY +// +// Declare the start of a write barrier function. Use similarly to NESTED_ENTRY. This is the only legal way +// to declare a write barrier function. +// +.macro WRITE_BARRIER_ENTRY name + LEAF_ENTRY \name, _TEXT +.endm + +// WRITE_BARRIER_END +// +// The partner to WRITE_BARRIER_ENTRY, used like NESTED_END. +// +.macro WRITE_BARRIER_END name + LEAF_END_MARKED \name, _TEXT +.endm + +.balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line +//------------------------------------------ +// Start of the writeable code region +LEAF_ENTRY JIT_PatchedCodeStart, _TEXT + ret lr +LEAF_END JIT_PatchedCodeStart, _TEXT + +// void JIT_ByRefWriteBarrier +// On entry: +// x13 : the source address (points to object reference to write) +// x14 : the destination address (object reference written here) +// +// On exit: +// x12 : trashed +// x13 : incremented by 8 +// x14 : incremented by 8 +// x15 : trashed +// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP +// +WRITE_BARRIER_ENTRY JIT_ByRefWriteBarrier + + ldr x15, [x13], 8 + b C_FUNC(JIT_CheckedWriteBarrier) + +WRITE_BARRIER_END JIT_ByRefWriteBarrier + +//----------------------------------------------------------------------------- +// Simple WriteBarriers +// void JIT_CheckedWriteBarrier(Object** dst, Object* src) +// On entry: +// x14 : the destination address (LHS of the assignment) +// x15 : the object reference (RHS of the assignment) +// +// On exit: +// x12 : trashed +// x14 : trashed (incremented by 8 to implement JIT_ByRefWriteBarrier contract) +// x15 : trashed +// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP +// +WRITE_BARRIER_ENTRY JIT_CheckedWriteBarrier + ldr x12, LOCAL_LABEL(wbs_lowest_address) + cmp x14, x12 + + ldr x12, LOCAL_LABEL(wbs_highest_address) + + // Compare against the upper bound if the previous comparison indicated + // that the destination address is greater than or equal to the lower + // bound. Otherwise, set the C flag (specified by the 0x2) so that the + // branch below is not taken. + ccmp x14, x12, #0x2, hs + + bhs LOCAL_LABEL(NotInHeap) + + b C_FUNC(JIT_WriteBarrier) + +LOCAL_LABEL(NotInHeap): + str x15, [x14], 8 + ret lr +WRITE_BARRIER_END JIT_CheckedWriteBarrier + +// void JIT_WriteBarrier(Object** dst, Object* src) +// On entry: +// x14 : the destination address (LHS of the assignment) +// x15 : the object reference (RHS of the assignment) +// +// On exit: +// x12 : trashed +// x14 : trashed (incremented by 8 to implement JIT_ByRefWriteBarrier contract) +// x15 : trashed +// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP +// +WRITE_BARRIER_ENTRY JIT_WriteBarrier + stlr x15, [x14] + +#ifdef WRITE_BARRIER_CHECK + // Update GC Shadow Heap + + // Do not perform the work if g_GCShadow is 0 + ldr x12, LOCAL_LABEL(wbs_GCShadow) + cbz x12, LOCAL_LABEL(ShadowUpdateDisabled) + + // need temporary register. Save before using. + str x13, [sp, #-16]! + + // Compute address of shadow heap location: + // pShadow = g_GCShadow + (x14 - g_lowest_address) + ldr x13, LOCAL_LABEL(wbs_lowest_address) + sub x13, x14, x13 + add x12, x13, x12 + + // if (pShadow >= g_GCShadowEnd) goto end + ldr x13, LOCAL_LABEL(wbs_GCShadowEnd) + cmp x12, x13 + bhs LOCAL_LABEL(ShadowUpdateEnd) + + // *pShadow = x15 + str x15, [x12] + + // Ensure that the write to the shadow heap occurs before the read from the GC heap so that race + // conditions are caught by INVALIDGCVALUE. + dmb ish + + // if ([x14] == x15) goto end + ldr x13, [x14] + cmp x13, x15 + beq LOCAL_LABEL(ShadowUpdateEnd) + + // *pShadow = INVALIDGCVALUE (0xcccccccd) + movz x13, #0xcccd + movk x13, #0xcccc, LSL #16 + str x13, [x12] + +LOCAL_LABEL(ShadowUpdateEnd): + ldr x13, [sp], #16 +LOCAL_LABEL(ShadowUpdateDisabled): +#endif + +#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP + // Update the write watch table if necessary + ldr x12, LOCAL_LABEL(wbs_sw_ww_table) + cbz x12, LOCAL_LABEL(CheckCardTable) + add x12, x12, x14, lsr #0xc // SoftwareWriteWatch::AddressToTableByteIndexShift + ldrb w17, [x12] + cbnz x17, LOCAL_LABEL(CheckCardTable) + mov w17, #0xFF + strb w17, [x12] +#endif + +LOCAL_LABEL(CheckCardTable): + // Branch to Exit if the reference is not in the Gen0 heap + // + ldr x12, LOCAL_LABEL(wbs_ephemeral_low) + cbz x12, LOCAL_LABEL(SkipEphemeralCheck) + cmp x15, x12 + + ldr x12, LOCAL_LABEL(wbs_ephemeral_high) + + // Compare against the upper bound if the previous comparison indicated + // that the destination address is greater than or equal to the lower + // bound. Otherwise, set the C flag (specified by the 0x2) so that the + // branch to exit is taken. + ccmp x15, x12, #0x2, hs + + bhs LOCAL_LABEL(Exit) + +LOCAL_LABEL(SkipEphemeralCheck): + // Check if we need to update the card table + ldr x12, LOCAL_LABEL(wbs_card_table) + add x15, x12, x14, lsr #11 + ldrb w12, [x15] + cmp x12, 0xFF + beq LOCAL_LABEL(Exit) + +LOCAL_LABEL(UpdateCardTable): + mov x12, 0xFF + strb w12, [x15] + +#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES + // Check if we need to update the card bundle table + ldr x12, LOCAL_LABEL(wbs_card_bundle_table) + add x15, x12, x14, lsr #21 + ldrb w12, [x15] + cmp x12, 0xFF + beq LOCAL_LABEL(Exit) + +LOCAL_LABEL(UpdateCardBundle): + mov x12, 0xFF + strb w12, [x15] +#endif + +LOCAL_LABEL(Exit): + add x14, x14, 8 + ret lr +WRITE_BARRIER_END JIT_WriteBarrier + + // Begin patchable literal pool + .balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line +WRITE_BARRIER_ENTRY JIT_WriteBarrier_Table +LOCAL_LABEL(wbs_begin): +LOCAL_LABEL(wbs_card_table): + .quad 0 +LOCAL_LABEL(wbs_card_bundle_table): + .quad 0 +LOCAL_LABEL(wbs_sw_ww_table): + .quad 0 +LOCAL_LABEL(wbs_ephemeral_low): + .quad 0 +LOCAL_LABEL(wbs_ephemeral_high): + .quad 0 +LOCAL_LABEL(wbs_lowest_address): + .quad 0 +LOCAL_LABEL(wbs_highest_address): + .quad 0 +#ifdef WRITE_BARRIER_CHECK +LOCAL_LABEL(wbs_GCShadow): + .quad 0 +LOCAL_LABEL(wbs_GCShadowEnd): + .quad 0 +#endif +WRITE_BARRIER_END JIT_WriteBarrier_Table + + +// ------------------------------------------------------------------ +// End of the writeable code region +LEAF_ENTRY JIT_PatchedCodeLast, _TEXT + ret lr +LEAF_END JIT_PatchedCodeLast, _TEXT diff --git a/src/coreclr/vm/arm64/patchedcode.asm b/src/coreclr/vm/arm64/patchedcode.asm new file mode 100644 index 0000000000000..bd4f57cc6810c --- /dev/null +++ b/src/coreclr/vm/arm64/patchedcode.asm @@ -0,0 +1,239 @@ +; Licensed to the .NET Foundation under one or more agreements. +; The .NET Foundation licenses this file to you under the MIT license. + +#include "ksarm64.h" +#include "asmconstants.h" +#include "asmmacros.h" + + ;;like TEXTAREA, but with 64 byte alignment so that we can align the patchable pool below to 64 without warning + AREA |.text|,ALIGN=6,CODE,READONLY + +;----------------------------------------------------------------------------- +; The following Macros help in WRITE_BARRIER Implementations + ; WRITE_BARRIER_ENTRY + ; + ; Declare the start of a write barrier function. Use similarly to NESTED_ENTRY. This is the only legal way + ; to declare a write barrier function. + ; + MACRO + WRITE_BARRIER_ENTRY $name + + LEAF_ENTRY $name + MEND + + ; WRITE_BARRIER_END + ; + ; The partner to WRITE_BARRIER_ENTRY, used like NESTED_END. + ; + MACRO + WRITE_BARRIER_END $__write_barrier_name + + LEAF_END_MARKED $__write_barrier_name + + MEND + +; ------------------------------------------------------------------ +; Start of the writeable code region + LEAF_ENTRY JIT_PatchedCodeStart + ret lr + LEAF_END + + ; Begin patchable literal pool + ALIGN 64 ; Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line + WRITE_BARRIER_ENTRY JIT_WriteBarrier_Table +wbs_begin +wbs_card_table + DCQ 0 +wbs_card_bundle_table + DCQ 0 +wbs_sw_ww_table + DCQ 0 +wbs_ephemeral_low + DCQ 0 +wbs_ephemeral_high + DCQ 0 +wbs_lowest_address + DCQ 0 +wbs_highest_address + DCQ 0 +#ifdef WRITE_BARRIER_CHECK +wbs_GCShadow + DCQ 0 +wbs_GCShadowEnd + DCQ 0 +#endif + WRITE_BARRIER_END JIT_WriteBarrier_Table + +; void JIT_ByRefWriteBarrier +; On entry: +; x13 : the source address (points to object reference to write) +; x14 : the destination address (object reference written here) +; +; On exit: +; x12 : trashed +; x13 : incremented by 8 +; x14 : incremented by 8 +; x15 : trashed +; x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP +; + WRITE_BARRIER_ENTRY JIT_ByRefWriteBarrier + + ldr x15, [x13], 8 + b JIT_CheckedWriteBarrier + + WRITE_BARRIER_END JIT_ByRefWriteBarrier + +;----------------------------------------------------------------------------- +; Simple WriteBarriers +; void JIT_CheckedWriteBarrier(Object** dst, Object* src) +; On entry: +; x14 : the destination address (LHS of the assignment) +; x15 : the object reference (RHS of the assignment) +; +; On exit: +; x12 : trashed +; x14 : incremented by 8 +; x15 : trashed +; x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP +; + WRITE_BARRIER_ENTRY JIT_CheckedWriteBarrier + ldr x12, wbs_lowest_address + cmp x14, x12 + + ldr x12, wbs_highest_address + ccmphs x14, x12, #0x2 + blo JIT_WriteBarrier + +NotInHeap + str x15, [x14], 8 + ret lr + WRITE_BARRIER_END JIT_CheckedWriteBarrier + +; void JIT_WriteBarrier(Object** dst, Object* src) +; On entry: +; x14 : the destination address (LHS of the assignment) +; x15 : the object reference (RHS of the assignment) +; +; On exit: +; x12 : trashed +; x14 : incremented by 8 +; x15 : trashed +; x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP +; + WRITE_BARRIER_ENTRY JIT_WriteBarrier + stlr x15, [x14] + +#ifdef WRITE_BARRIER_CHECK + ; Update GC Shadow Heap + + ; Do not perform the work if g_GCShadow is 0 + ldr x12, wbs_GCShadow + cbz x12, ShadowUpdateDisabled + + ; need temporary register. Save before using. + str x13, [sp, #-16]! + + ; Compute address of shadow heap location: + ; pShadow = $g_GCShadow + (x14 - g_lowest_address) + ldr x13, wbs_lowest_address + sub x13, x14, x13 + add x12, x13, x12 + + ; if (pShadow >= $g_GCShadowEnd) goto end + ldr x13, wbs_GCShadowEnd + cmp x12, x13 + bhs ShadowUpdateEnd + + ; *pShadow = x15 + str x15, [x12] + + ; Ensure that the write to the shadow heap occurs before the read from the GC heap so that race + ; conditions are caught by INVALIDGCVALUE. + dmb ish + + ; if ([x14] == x15) goto end + ldr x13, [x14] + cmp x13, x15 + beq ShadowUpdateEnd + + ; *pShadow = INVALIDGCVALUE (0xcccccccd) + movz x13, #0xcccd + movk x13, #0xcccc, LSL #16 + str x13, [x12] + +ShadowUpdateEnd + ldr x13, [sp], #16 +ShadowUpdateDisabled +#endif + +#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP + ; Update the write watch table if necessary + ldr x12, wbs_sw_ww_table + cbz x12, CheckCardTable + add x12, x12, x14, LSR #0xC // SoftwareWriteWatch::AddressToTableByteIndexShift + ldrb w17, [x12] + cbnz x17, CheckCardTable + mov w17, 0xFF + strb w17, [x12] +#endif + +CheckCardTable + ; Branch to Exit if the reference is not in the Gen0 heap + ; + ldr x12, wbs_ephemeral_low + cbz x12, SkipEphemeralCheck + cmp x15, x12 + + ldr x12, wbs_ephemeral_high + + ; Compare against the upper bound if the previous comparison indicated + ; that the destination address is greater than or equal to the lower + ; bound. Otherwise, set the C flag (specified by the 0x2) so that the + ; branch to exit is taken. + ccmp x15, x12, #0x2, hs + + bhs Exit + +SkipEphemeralCheck + ; Check if we need to update the card table + ldr x12, wbs_card_table + + ; x15 := pointer into card table + add x15, x12, x14, lsr #11 + + ldrb w12, [x15] + cmp x12, 0xFF + beq Exit + +UpdateCardTable + mov x12, 0xFF + strb w12, [x15] + +#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES + ; Check if we need to update the card bundle table + ldr x12, wbs_card_bundle_table + + ; x15 := pointer into card bundle table + add x15, x12, x14, lsr #21 + + ldrb w12, [x15] + cmp x12, 0xFF + beq Exit + + mov x12, 0xFF + strb w12, [x15] +#endif + +Exit + add x14, x14, 8 + ret lr + WRITE_BARRIER_END JIT_WriteBarrier + +; ------------------------------------------------------------------ +; End of the writeable code region + LEAF_ENTRY JIT_PatchedCodeLast + ret lr + LEAF_END + +; Must be at very end of file + END diff --git a/src/coreclr/vm/ceeload.cpp b/src/coreclr/vm/ceeload.cpp index f3e3a882d3878..cbabf92302b79 100644 --- a/src/coreclr/vm/ceeload.cpp +++ b/src/coreclr/vm/ceeload.cpp @@ -2188,6 +2188,16 @@ ISymUnmanagedReader *Module::GetISymUnmanagedReaderNoThrow(void) RETURN (ret); } +#if defined(HOST_AMD64) +#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.amd64.dll") +#elif defined(HOST_X86) +#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.x86.dll") +#elif defined(HOST_ARM) +#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm.dll") +#elif defined(HOST_ARM64) +#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm64.dll") +#endif + ISymUnmanagedReader *Module::GetISymUnmanagedReader(void) { CONTRACT(ISymUnmanagedReader *) diff --git a/src/coreclr/vm/ceeload.h b/src/coreclr/vm/ceeload.h index 4d714d2024196..540d386fa6e45 100644 --- a/src/coreclr/vm/ceeload.h +++ b/src/coreclr/vm/ceeload.h @@ -79,21 +79,6 @@ class EnCEEClassData; #define METHOD_STUBS_HASH_BUCKETS 11 #define GUID_TO_TYPE_HASH_BUCKETS 16 -// The native symbol reader dll name -#if defined(HOST_AMD64) -#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.amd64.dll") -#elif defined(HOST_X86) -#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.x86.dll") -#elif defined(HOST_ARM) -#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm.dll") -#elif defined(HOST_ARM64) -#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm64.dll") -#elif defined(HOST_LOONGARCH64) -#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.loongarch64.dll") -#elif defined(HOST_RISCV64) -#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.riscv64.dll") -#endif - typedef DPTR(JITInlineTrackingMap) PTR_JITInlineTrackingMap; // diff --git a/src/coreclr/vm/ceemain.cpp b/src/coreclr/vm/ceemain.cpp index 91494083dc911..c5eaacd0c168e 100644 --- a/src/coreclr/vm/ceemain.cpp +++ b/src/coreclr/vm/ceemain.cpp @@ -224,13 +224,12 @@ extern "C" HRESULT __cdecl CorDBGetInterface(DebugInterface** rcInterface); // g_coreclr_embedded indicates that coreclr is linked directly into the program // g_hostpolicy_embedded indicates that the hostpolicy library is linked directly into the executable -// Note: that it can happen that the hostpolicy is embedded but coreclr isn't (on Windows singlefilehost is built that way) #ifdef CORECLR_EMBEDDED bool g_coreclr_embedded = true; bool g_hostpolicy_embedded = true; // We always embed hostpolicy if coreclr is also embedded #else bool g_coreclr_embedded = false; -bool g_hostpolicy_embedded = false; // In this case the value may come from a runtime property and may change +bool g_hostpolicy_embedded = false; #endif // Remember how the last startup of EE went. diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 8f8fa45d853dc..4dddb61967556 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -920,35 +920,6 @@ BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry) #endif } -// When we have fragmented unwind we usually want to refer to the -// unwind record that includes the prolog. We can find it by searching -// back in the sequence of unwind records. -PTR_RUNTIME_FUNCTION FindRootEntry(PTR_RUNTIME_FUNCTION pFunctionEntry, TADDR baseAddress) -{ - LIMITED_METHOD_DAC_CONTRACT; - - PTR_RUNTIME_FUNCTION pRootEntry = pFunctionEntry; - - if (pRootEntry != NULL) - { - // Walk backwards in the RUNTIME_FUNCTION array until we find a non-fragment. - // We're guaranteed to find one, because we require that a fragment live in a function or funclet - // that has a prolog, which will have non-fragment .xdata. - while (true) - { - if (!IsFunctionFragment(baseAddress, pRootEntry)) - { - // This is not a fragment; we're done - break; - } - - --pRootEntry; - } - } - - return pRootEntry; -} - #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS @@ -1134,12 +1105,30 @@ TADDR IJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo) #endif TADDR baseAddress = pCodeInfo->GetModuleBase(); + TADDR funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry); #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) - pFunctionEntry = FindRootEntry(pFunctionEntry, baseAddress); -#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS + // Is the RUNTIME_FUNCTION a fragment? If so, we need to walk backwards until we find the first + // non-fragment RUNTIME_FUNCTION, and use that one. This happens when we have very large functions + // and multiple RUNTIME_FUNCTION entries per function or funclet. However, all but the first will + // have the "F" bit set in the unwind data, indicating a fragment (with phantom prolog unwind codes). - TADDR funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry); + for (;;) + { + if (!IsFunctionFragment(baseAddress, pFunctionEntry)) + { + // This is not a fragment; we're done + break; + } + + // We found a fragment. Walk backwards in the RUNTIME_FUNCTION array until we find a non-fragment. + // We're guaranteed to find one, because we require that a fragment live in a function or funclet + // that has a prolog, which will have non-fragment .xdata. + --pFunctionEntry; + + funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry); + } +#endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS return funcletStartAddress; } @@ -4133,8 +4122,6 @@ void EEJitManager::NibbleMapSetUnlocked(HeapList * pHp, TADDR pCode, BOOL bSet) #endif // !DACCESS_COMPILE #if defined(FEATURE_EH_FUNCLETS) -// Note: This returns the root unwind record (the one that describes the prolog) -// in cases where there is fragmented unwind. PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo) { CONTRACTL { @@ -4163,14 +4150,6 @@ PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo) if (RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) <= address && address < RUNTIME_FUNCTION__EndAddress(pFunctionEntry, baseAddress)) { - -#if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS) && (defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)) - // If we might have fragmented unwind, and we're on ARM64/LoongArch64, - // make sure to returning the root record, - // as the trailing records don't have prolog unwind codes. - pFunctionEntry = FindRootEntry(pFunctionEntry, baseAddress); -#endif - return pFunctionEntry; } } diff --git a/src/coreclr/vm/codeversion.cpp b/src/coreclr/vm/codeversion.cpp index 9c1fd769081b3..bc7937ad37263 100644 --- a/src/coreclr/vm/codeversion.cpp +++ b/src/coreclr/vm/codeversion.cpp @@ -1771,8 +1771,9 @@ PCODE CodeVersionManager::PublishVersionableCodeIfNecessary( } else { + #ifdef FEATURE_TIERED_COMPILATION _ASSERTE(!config->ShouldCountCalls()); - + #endif // The thread that generated or loaded the new code will publish the code and backpatch if necessary doPublish = false; } diff --git a/src/coreclr/vm/comsynchronizable.cpp b/src/coreclr/vm/comsynchronizable.cpp index 7df5589fea22d..35ddf6d04c1b8 100644 --- a/src/coreclr/vm/comsynchronizable.cpp +++ b/src/coreclr/vm/comsynchronizable.cpp @@ -1035,7 +1035,7 @@ FCIMPL0(INT32, ThreadNative::GetOptimalMaxSpinWaitsPerSpinIteration) } FCIMPLEND -FCIMPL1(void, ThreadNative::SpinWait, int iterations) +extern "C" void QCALLTYPE ThreadNative_SpinWait(INT32 iterations) { FCALL_CONTRACT; @@ -1044,29 +1044,8 @@ FCIMPL1(void, ThreadNative::SpinWait, int iterations) return; } - // - // If we're not going to spin for long, it's ok to remain in cooperative mode. - // The threshold is determined by the cost of entering preemptive mode; if we're - // spinning for less than that number of cycles, then switching to preemptive - // mode won't help a GC start any faster. - // - if (iterations <= 100000) - { - YieldProcessorNormalized(iterations); - return; - } - - // - // Too many iterations; better switch to preemptive mode to avoid stalling a GC. - // - HELPER_METHOD_FRAME_BEGIN_NOPOLL(); - GCX_PREEMP(); - YieldProcessorNormalized(iterations); - - HELPER_METHOD_FRAME_END(); } -FCIMPLEND extern "C" BOOL QCALLTYPE ThreadNative_YieldThread() { diff --git a/src/coreclr/vm/comsynchronizable.h b/src/coreclr/vm/comsynchronizable.h index a069d109a79d1..a174e2cedf13d 100644 --- a/src/coreclr/vm/comsynchronizable.h +++ b/src/coreclr/vm/comsynchronizable.h @@ -101,6 +101,7 @@ extern "C" BOOL QCALLTYPE ThreadNative_YieldThread(); extern "C" UINT64 QCALLTYPE ThreadNative_GetCurrentOSThreadId(); extern "C" void QCALLTYPE ThreadNative_Abort(QCall::ThreadHandle thread); extern "C" void QCALLTYPE ThreadNative_ResetAbort(); +extern "C" void QCALLTYPE ThreadNative_SpinWait(INT32 iterations); #endif // _COMSYNCHRONIZABLE_H diff --git a/src/coreclr/vm/ecalllist.h b/src/coreclr/vm/ecalllist.h index e4663c025e6c6..2f1f27bdf3eee 100644 --- a/src/coreclr/vm/ecalllist.h +++ b/src/coreclr/vm/ecalllist.h @@ -366,7 +366,6 @@ FCFuncStart(gThreadFuncs) FCFuncElement("InternalGetCurrentThread", GetThread) FCFuncElement("SleepInternal", ThreadNative::Sleep) FCFuncElement("Initialize", ThreadNative::Initialize) - FCFuncElement("SpinWaitInternal", ThreadNative::SpinWait) FCFuncElement("GetCurrentThreadNative", ThreadNative::GetCurrentThread) FCFuncElement("InternalFinalize", ThreadNative::Finalize) FCFuncElement("get_IsAlive", ThreadNative::IsAlive) diff --git a/src/coreclr/vm/methodtable.h b/src/coreclr/vm/methodtable.h index 2023c44d898a6..9c06faf6c4323 100644 --- a/src/coreclr/vm/methodtable.h +++ b/src/coreclr/vm/methodtable.h @@ -2246,6 +2246,7 @@ class MethodTable void SetHasCriticalFinalizer() { LIMITED_METHOD_CONTRACT; + _ASSERTE(!HasComponentSize()); SetFlag(enum_flag_HasCriticalFinalizer); } // Does this class have non-trivial finalization requirements? @@ -2259,7 +2260,7 @@ class MethodTable DWORD HasCriticalFinalizer() const { LIMITED_METHOD_CONTRACT; - return GetFlag(enum_flag_HasCriticalFinalizer); + return !HasComponentSize() && GetFlag(enum_flag_HasCriticalFinalizer); } //------------------------------------------------------------------- @@ -3291,15 +3292,15 @@ public : // apply to Strings / Arrays. enum_flag_UNUSED_ComponentSize_1 = 0x00000001, - - enum_flag_StaticsMask = 0x00000006, + // GC depends on this bit + enum_flag_HasCriticalFinalizer = 0x00000002, // finalizer must be run on Appdomain Unload + enum_flag_StaticsMask = 0x0000000C, enum_flag_StaticsMask_NonDynamic = 0x00000000, - enum_flag_StaticsMask_Dynamic = 0x00000002, // dynamic statics (EnC, reflection.emit) + enum_flag_StaticsMask_Dynamic = 0x00000008, // dynamic statics (EnC, reflection.emit) enum_flag_StaticsMask_Generics = 0x00000004, // generics statics - enum_flag_StaticsMask_CrossModuleGenerics = 0x00000006, // cross module generics statics (NGen) - enum_flag_StaticsMask_IfGenericsThenCrossModule = 0x00000002, // helper constant to get rid of unnecessary check + enum_flag_StaticsMask_CrossModuleGenerics = 0x0000000C, // cross module generics statics (NGen) + enum_flag_StaticsMask_IfGenericsThenCrossModule = 0x00000008, // helper constant to get rid of unnecessary check - enum_flag_NotInPZM = 0x00000008, // True if this type is not in its PreferredZapModule enum_flag_GenericsMask = 0x00000030, enum_flag_GenericsMask_NonGeneric = 0x00000000, // no instantiation @@ -3328,9 +3329,10 @@ public : enum_flag_IsByRefLike = 0x00001000, + enum_flag_NotInPZM = 0x00002000, // True if this type is not in its PreferredZapModule + // In a perfect world we would fill these flags using other flags that we already have // which have a constant value for something which has a component size. - enum_flag_UNUSED_ComponentSize_5 = 0x00002000, enum_flag_UNUSED_ComponentSize_6 = 0x00004000, enum_flag_UNUSED_ComponentSize_7 = 0x00008000, @@ -3342,7 +3344,8 @@ public : // As you change the flags in WFLAGS_LOW_ENUM you also need to change this // to be up to date to reflect the default values of those flags for the // case where this MethodTable is for a String or Array - enum_flag_StringArrayValues = SET_TRUE(enum_flag_StaticsMask_NonDynamic) | + enum_flag_StringArrayValues = SET_FALSE(enum_flag_HasCriticalFinalizer) | + SET_TRUE(enum_flag_StaticsMask_NonDynamic) | SET_FALSE(enum_flag_NotInPZM) | SET_TRUE(enum_flag_GenericsMask_NonGeneric) | SET_FALSE(enum_flag_HasVariance) | @@ -3387,9 +3390,10 @@ public : enum_flag_Category_ElementTypeMask = 0x000E0000, // bits that matter for element type mask + // GC depends on this bit enum_flag_HasFinalizer = 0x00100000, // instances require finalization - enum_flag_IDynamicInterfaceCastable = 0x00200000, // class implements IDynamicInterfaceCastable interface + enum_flag_IDynamicInterfaceCastable = 0x10000000, // class implements IDynamicInterfaceCastable interface enum_flag_ICastable = 0x00400000, // class implements ICastable interface @@ -3401,8 +3405,8 @@ public : enum_flag_IsTrackedReferenceWithFinalizer = 0x04000000, - enum_flag_HasCriticalFinalizer = 0x08000000, // finalizer must be run on Appdomain Unload - enum_flag_Collectible = 0x10000000, + // GC depends on this bit + enum_flag_Collectible = 0x00200000, enum_flag_ContainsGenericVariables = 0x20000000, // we cache this flag to help detect these efficiently and // to detect this condition when restoring diff --git a/src/coreclr/vm/qcallentrypoints.cpp b/src/coreclr/vm/qcallentrypoints.cpp index 0725bd7a87f09..3682a4cef1691 100644 --- a/src/coreclr/vm/qcallentrypoints.cpp +++ b/src/coreclr/vm/qcallentrypoints.cpp @@ -204,6 +204,7 @@ static const Entry s_QCall[] = DllImportEntry(ThreadNative_GetCurrentOSThreadId) DllImportEntry(ThreadNative_Abort) DllImportEntry(ThreadNative_ResetAbort) + DllImportEntry(ThreadNative_SpinWait) #ifdef TARGET_UNIX DllImportEntry(WaitHandle_CorWaitOnePrioritizedNative) #endif @@ -307,6 +308,8 @@ static const Entry s_QCall[] = DllImportEntry(OpenMutexW) DllImportEntry(OpenSemaphoreW) DllImportEntry(OutputDebugStringW) + DllImportEntry(PAL_CreateMutexW) + DllImportEntry(PAL_OpenMutexW) DllImportEntry(ReleaseMutex) DllImportEntry(ReleaseSemaphore) DllImportEntry(ResetEvent) diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj index e8e8591bbaf8b..599adf5008634 100644 --- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj +++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Crossgen2.sfxproj @@ -28,36 +28,15 @@ + + + + - - - - - - - - - - <_CrossgenPublishFiles Include="@(_RawCrossgenPublishFiles->'%(OutputPath)')" KeepMetadata="REMOVE_ALL" /> diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.props b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.props index 7b4f7b877ee74..6baa79be74b8c 100644 --- a/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.props +++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/Microsoft.NETCore.App.Runtime.props @@ -17,6 +17,7 @@ true true The .NET Shared Framework + $(PublishReadyToRun) @@ -178,6 +179,8 @@ + + true diff --git a/src/installer/pkg/sfx/Microsoft.NETCore.App/ReadyToRun.targets b/src/installer/pkg/sfx/Microsoft.NETCore.App/ReadyToRun.targets index 0b82891e6c42e..436b95b37dcc8 100644 --- a/src/installer/pkg/sfx/Microsoft.NETCore.App/ReadyToRun.targets +++ b/src/installer/pkg/sfx/Microsoft.NETCore.App/ReadyToRun.targets @@ -1,53 +1,16 @@ - - - - - - - - - - - - - - - $(BuildArchitecture) - $(CoreCLRArtifactsPath)\$(CrossDir)\crossgen2\crossgen2.dll - - true - @(PublishReadyToRunCrossgen2ExtraArgsList) + $([MSBuild]::NormalizePath('$(Crossgen2Dir)', 'tools', 'crossgen2.dll')) 1 - - .sh - .cmd - - <_crossTargetJit Include="@(CoreCLRCrossTargetFiles)" Condition="'%(FileName)' == '$(LibPrefix)clrjit' and '%(Extension)' == '$(LibSuffix)'" /> - <_clrjit Include="@(RuntimeFiles)" Condition="'%(FileName)' == '$(LibPrefix)clrjit' and '%(Extension)' == '$(LibSuffix)'" /> - <_crossTargetCrossgen Include="@(CoreCLRCrossTargetFiles)" Condition="'%(FileName)' == 'crossgen' and '%(Extension)' == '$(ExeSuffix)'" /> - <_crossgen Include="@(RuntimeFiles)" Condition="'%(FileName)' == 'crossgen' and '%(Extension)' == '$(ExeSuffix)'" /> - - - - - - - - + \ No newline at end of file diff --git a/src/libraries/Common/src/SourceGenerators/DiagnosticInfo.cs b/src/libraries/Common/src/SourceGenerators/DiagnosticInfo.cs new file mode 100644 index 0000000000000..74f44f99c62ba --- /dev/null +++ b/src/libraries/Common/src/SourceGenerators/DiagnosticInfo.cs @@ -0,0 +1,60 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Linq; +using System.Numerics.Hashing; +using Microsoft.CodeAnalysis; + +namespace SourceGenerators; + +/// +/// Descriptor for diagnostic instances using structural equality comparison. +/// Provides a work-around for https://github.com/dotnet/roslyn/issues/68291. +/// +internal readonly struct DiagnosticInfo : IEquatable +{ + public DiagnosticDescriptor Descriptor { get; private init; } + public object?[] MessageArgs { get; private init; } + public Location? Location { get; private init; } + + public static DiagnosticInfo Create(DiagnosticDescriptor descriptor, Location? location, object?[]? messageArgs) + { + Location? trimmedLocation = location is null ? null : GetTrimmedLocation(location); + + return new DiagnosticInfo + { + Descriptor = descriptor, + Location = trimmedLocation, + MessageArgs = messageArgs ?? Array.Empty() + }; + + // Creates a copy of the Location instance that does not capture a reference to Compilation. + static Location GetTrimmedLocation(Location location) + => Location.Create(location.SourceTree?.FilePath ?? "", location.SourceSpan, location.GetLineSpan().Span); + } + + public Diagnostic CreateDiagnostic() + => Diagnostic.Create(Descriptor, Location, MessageArgs); + + public override readonly bool Equals(object? obj) => obj is DiagnosticInfo info && Equals(info); + + public readonly bool Equals(DiagnosticInfo other) + { + return Descriptor.Equals(other.Descriptor) && + MessageArgs.SequenceEqual(other.MessageArgs) && + Location == other.Location; + } + + public override readonly int GetHashCode() + { + int hashCode = Descriptor.GetHashCode(); + foreach (object? messageArg in MessageArgs) + { + hashCode = HashHelpers.Combine(hashCode, messageArg?.GetHashCode() ?? 0); + } + + hashCode = HashHelpers.Combine(hashCode, Location?.GetHashCode() ?? 0); + return hashCode; + } +} diff --git a/src/libraries/System.Text.Json/gen/Helpers/ImmutableEquatableArray.cs b/src/libraries/Common/src/SourceGenerators/ImmutableEquatableArray.cs similarity index 85% rename from src/libraries/System.Text.Json/gen/Helpers/ImmutableEquatableArray.cs rename to src/libraries/Common/src/SourceGenerators/ImmutableEquatableArray.cs index ac3aa804fdd9d..47fdde1751882 100644 --- a/src/libraries/System.Text.Json/gen/Helpers/ImmutableEquatableArray.cs +++ b/src/libraries/Common/src/SourceGenerators/ImmutableEquatableArray.cs @@ -1,12 +1,13 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System; using System.Collections; using System.Collections.Generic; using System.Linq; using System.Numerics.Hashing; -namespace System.Text.Json.SourceGeneration +namespace SourceGenerators { /// /// Provides an immutable list implementation which implements sequence equality. @@ -72,15 +73,9 @@ public bool MoveNext() } } - public static class ImmutableEquatableArray + internal static class ImmutableEquatableArray { - public static ImmutableEquatableArray Empty() where T : IEquatable - => ImmutableEquatableArray.Empty; - public static ImmutableEquatableArray ToImmutableEquatableArray(this IEnumerable values) where T : IEquatable => new(values); - - public static ImmutableEquatableArray Create(params T[] values) where T : IEquatable - => values is { Length: > 0 } ? new(values) : ImmutableEquatableArray.Empty; } } diff --git a/src/libraries/Common/src/SourceGenerators/TypeModelHelper.cs b/src/libraries/Common/src/SourceGenerators/TypeModelHelper.cs index 73c19d61ca122..7a3a3e98fd7fd 100644 --- a/src/libraries/Common/src/SourceGenerators/TypeModelHelper.cs +++ b/src/libraries/Common/src/SourceGenerators/TypeModelHelper.cs @@ -3,6 +3,8 @@ using Microsoft.CodeAnalysis; using System.Collections.Generic; +using System.Collections.Immutable; +using System.Linq; namespace SourceGenerators { @@ -32,5 +34,7 @@ void TraverseContainingTypes(INamedTypeSymbol current) } } } + + public static string GetFullyQualifiedName(this ITypeSymbol type) => type.ToDisplayString(SymbolDisplayFormat.FullyQualifiedFormat); } } diff --git a/src/libraries/System.Text.Json/gen/Model/TypeRef.cs b/src/libraries/Common/src/SourceGenerators/TypeRef.cs similarity index 96% rename from src/libraries/System.Text.Json/gen/Model/TypeRef.cs rename to src/libraries/Common/src/SourceGenerators/TypeRef.cs index 050aba0cda658..cfbf33ed74136 100644 --- a/src/libraries/System.Text.Json/gen/Model/TypeRef.cs +++ b/src/libraries/Common/src/SourceGenerators/TypeRef.cs @@ -1,10 +1,11 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System; using System.Diagnostics; using Microsoft.CodeAnalysis; -namespace System.Text.Json.SourceGeneration +namespace SourceGenerators { /// /// An equatable value representing type identity. diff --git a/src/libraries/Common/src/System/IO/Win32Marshal.cs b/src/libraries/Common/src/System/IO/Win32Marshal.cs index ad2c81e9fc515..0c11b227e677e 100644 --- a/src/libraries/Common/src/System/IO/Win32Marshal.cs +++ b/src/libraries/Common/src/System/IO/Win32Marshal.cs @@ -22,7 +22,7 @@ internal static Exception GetExceptionForLastWin32Error(string? path = "") /// Converts the specified Win32 error into a corresponding object, optionally /// including the specified path in the error message. /// - internal static Exception GetExceptionForWin32Error(int errorCode, string? path = "") + internal static Exception GetExceptionForWin32Error(int errorCode, string? path = "", string? errorDetails = null) { // ERROR_SUCCESS gets thrown when another unexpected interop call was made before checking GetLastWin32Error(). // Errors have to get retrieved as soon as possible after P/Invoking to avoid this. @@ -57,13 +57,19 @@ internal static Exception GetExceptionForWin32Error(int errorCode, string? path case Interop.Errors.ERROR_OPERATION_ABORTED: return new OperationCanceledException(); case Interop.Errors.ERROR_INVALID_PARAMETER: + default: - string msg = string.IsNullOrEmpty(path) - ? GetPInvokeErrorMessage(errorCode) - : $"{GetPInvokeErrorMessage(errorCode)} : '{path}'"; - return new IOException( - msg, - MakeHRFromErrorCode(errorCode)); + string msg = GetPInvokeErrorMessage(errorCode); + if (!string.IsNullOrEmpty(path)) + { + msg += $" : '{path}'."; + } + if (!string.IsNullOrEmpty(errorDetails)) + { + msg += $" {errorDetails}"; + } + + return new IOException(msg, MakeHRFromErrorCode(errorCode)); } static string GetPInvokeErrorMessage(int errorCode) diff --git a/src/libraries/Common/src/System/Text/ValueStringBuilder.cs b/src/libraries/Common/src/System/Text/ValueStringBuilder.cs index 966f1c8cfc5ed..a0844b0466424 100644 --- a/src/libraries/Common/src/System/Text/ValueStringBuilder.cs +++ b/src/libraries/Common/src/System/Text/ValueStringBuilder.cs @@ -251,7 +251,7 @@ public unsafe void Append(char* value, int length) _pos += length; } - public void Append(ReadOnlySpan value) + public void Append(scoped ReadOnlySpan value) { int pos = _pos; if (pos > _chars.Length - value.Length) diff --git a/src/libraries/Common/tests/Common.Tests.csproj b/src/libraries/Common/tests/Common.Tests.csproj index 710ac02810174..97009778000f5 100644 --- a/src/libraries/Common/tests/Common.Tests.csproj +++ b/src/libraries/Common/tests/Common.Tests.csproj @@ -3,7 +3,6 @@ true true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-linux;$(NetCoreAppCurrent)-browser;$(NetCoreAppCurrent)-osx - true + /// Asserts for structural equality, returning a path to the mismatching data when not equal. + /// + public static void AssertStructurallyEqual(T expected, T actual) + { + CheckAreEqualCore(expected, actual, new()); + static void CheckAreEqualCore(object expected, object actual, Stack path) + { + if (expected is null || actual is null) + { + if (expected is not null || actual is not null) + { + FailNotEqual(); + } + + return; + } + + Type type = expected.GetType(); + if (type != actual.GetType()) + { + FailNotEqual(); + return; + } + + if (expected is IEnumerable leftCollection) + { + if (actual is not IEnumerable rightCollection) + { + FailNotEqual(); + return; + } + + object?[] expectedValues = leftCollection.Cast().ToArray(); + object?[] actualValues = rightCollection.Cast().ToArray(); + + for (int i = 0; i < Math.Max(expectedValues.Length, actualValues.Length); i++) + { + object? expectedElement = i < expectedValues.Length ? expectedValues[i] : ""; + object? actualElement = i < actualValues.Length ? actualValues[i] : ""; + + path.Push($"[{i}]"); + CheckAreEqualCore(expectedElement, actualElement, path); + path.Pop(); + } + } + + if (type.GetProperty("EqualityContract", BindingFlags.Instance | BindingFlags.NonPublic, null, returnType: typeof(Type), types: Array.Empty(), null) != null) + { + // Type is a C# record, run pointwise equality comparison. + foreach (PropertyInfo property in type.GetProperties(BindingFlags.Public | BindingFlags.Instance)) + { + path.Push("." + property.Name); + CheckAreEqualCore(property.GetValue(expected), property.GetValue(actual), path); + path.Pop(); + } + + return; + } + + if (!expected.Equals(actual)) + { + FailNotEqual(); + } + + void FailNotEqual() => Assert.Fail($"Value not equal in ${string.Join("", path.Reverse())}: expected {expected}, but was {actual}."); + } + } + } +} diff --git a/src/libraries/Common/tests/System/Security/Cryptography/PlatformSupport.cs b/src/libraries/Common/tests/System/Security/Cryptography/PlatformSupport.cs index dc3f24f1b2789..ae9f09caf23a5 100644 --- a/src/libraries/Common/tests/System/Security/Cryptography/PlatformSupport.cs +++ b/src/libraries/Common/tests/System/Security/Cryptography/PlatformSupport.cs @@ -2,6 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. using System; +using System.Collections.Generic; using System.Runtime.InteropServices; using System.Security.Cryptography; using Xunit; @@ -10,39 +11,58 @@ namespace Test.Cryptography { internal static class PlatformSupport { - private static Lazy s_lazyPlatformCryptoProviderFunctional = new Lazy(static () => + private static readonly Dictionary s_platformCryptoSupportedAlgorithms = new(); + + private static bool PlatformCryptoProviderFunctional(CngAlgorithm algorithm) { -#if !NETFRAMEWORK - if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + // Use a full lock around a non-concurrent dictionary. We do not want the value factory for + // ConcurrentDictionary to be executing simultaneously for the same algorithm. + lock (s_platformCryptoSupportedAlgorithms) { - return false; + if (s_platformCryptoSupportedAlgorithms.TryGetValue(algorithm, out bool supported)) + { + return supported; + } + + supported = DetermineAlgorithmFunctional(algorithm); + s_platformCryptoSupportedAlgorithms[algorithm] = supported; + return supported; } + + static bool DetermineAlgorithmFunctional(CngAlgorithm algorithm) + { +#if !NETFRAMEWORK + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + return false; + } #endif - CngKey key = null; + CngKey key = null; - try - { - key = CngKey.Create( - CngAlgorithm.ECDsaP256, - $"{nameof(PlatformCryptoProviderFunctional)}Key", - new CngKeyCreationParameters - { - Provider = new CngProvider("Microsoft Platform Crypto Provider"), - KeyCreationOptions = CngKeyCreationOptions.OverwriteExistingKey, - }); + try + { + key = CngKey.Create( + algorithm, + $"{nameof(PlatformCryptoProviderFunctional)}{algorithm.Algorithm}Key", + new CngKeyCreationParameters + { + Provider = new CngProvider("Microsoft Platform Crypto Provider"), + KeyCreationOptions = CngKeyCreationOptions.OverwriteExistingKey, + }); - return true; - } - catch (CryptographicException) - { - return false; + return true; + } + catch (CryptographicException) + { + return false; + } + finally + { + key?.Delete(); + } } - finally - { - key?.Delete(); - } - }); + } // Platforms that use Apple Cryptography internal const TestPlatforms AppleCrypto = TestPlatforms.OSX | TestPlatforms.iOS | TestPlatforms.tvOS | TestPlatforms.MacCatalyst; @@ -60,7 +80,8 @@ internal static class PlatformSupport internal static readonly bool IsAndroidVersionAtLeast31 = false; #endif - internal static bool PlatformCryptoProviderFunctional => s_lazyPlatformCryptoProviderFunctional.Value; - + internal static bool PlatformCryptoProviderFunctionalP256 => PlatformCryptoProviderFunctional(CngAlgorithm.ECDsaP256); + internal static bool PlatformCryptoProviderFunctionalP384 => PlatformCryptoProviderFunctional(CngAlgorithm.ECDsaP384); + internal static bool PlatformCryptoProviderFunctionalRsa => PlatformCryptoProviderFunctional(CngAlgorithm.Rsa); } } diff --git a/src/libraries/Common/tests/TestUtilities/System/PlatformDetection.cs b/src/libraries/Common/tests/TestUtilities/System/PlatformDetection.cs index 431bc77820d0b..5baf88cd19b0d 100644 --- a/src/libraries/Common/tests/TestUtilities/System/PlatformDetection.cs +++ b/src/libraries/Common/tests/TestUtilities/System/PlatformDetection.cs @@ -428,6 +428,7 @@ private static bool GetIsInContainer() return Registry.GetValue(key, "ContainerType", defaultValue: null) != null; } + // '/.dockerenv' - is to check if this is running in a codespace return (IsLinux && File.Exists("/.dockerenv")); } diff --git a/src/libraries/Common/tests/TestUtilities/TestUtilities.csproj b/src/libraries/Common/tests/TestUtilities/TestUtilities.csproj index 2273e3bd088a2..2a09abb5e79f6 100644 --- a/src/libraries/Common/tests/TestUtilities/TestUtilities.csproj +++ b/src/libraries/Common/tests/TestUtilities/TestUtilities.csproj @@ -8,7 +8,6 @@ and instead use runtime checks. --> $(NetCoreAppMinimum);$(NetFrameworkMinimum) - true (() => File.ReadAllText(path)); + + Assert.ThrowsAny(() => File.ReadAllText(path)); + + string? name = Interop.OSReleaseFile.GetPrettyName(path); + Assert.Null(name); + } + + [ConditionalFact(typeof(PlatformDetection), nameof(PlatformDetection.IsPrivilegedProcess)), PlatformSpecific(TestPlatforms.Linux)] + public void GetPrettyName_NonePrivileges_CanRead_ReturnsNull() + { + string path = CreateTestFile(); + File.SetUnixFileMode(path, UnixFileMode.None); + + // If user have root permissions, kernel doesn't care about access privileges, + // so there is no point in expecting System.Exception + Assert.Equal(UnixFileMode.None, File.GetUnixFileMode(path)); + // Because kernel ignored privileges check, file should be readable and empty + Assert.Equal("", File.ReadAllText(path)); string? name = Interop.OSReleaseFile.GetPrettyName(path); Assert.Null(name); diff --git a/src/libraries/Microsoft.Extensions.Configuration.Abstractions/src/IConfigurationProvider.cs b/src/libraries/Microsoft.Extensions.Configuration.Abstractions/src/IConfigurationProvider.cs index 20a7e5a6694f8..e957d91a0897d 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Abstractions/src/IConfigurationProvider.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Abstractions/src/IConfigurationProvider.cs @@ -27,9 +27,9 @@ public interface IConfigurationProvider void Set(string key, string? value); /// - /// Returns a change token if this provider supports change tracking, null otherwise. + /// Attempts to get an for change tracking. /// - /// The change token. + /// An token if this provider supports change tracking, otherwise. IChangeToken GetReloadToken(); /// diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.Emitter.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.Emitter.cs index 7206d54904114..1721a124dead9 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.Emitter.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.Emitter.cs @@ -1,7 +1,6 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -using System.Collections.Immutable; using Microsoft.CodeAnalysis; using SourceGenerators; @@ -11,19 +10,22 @@ public sealed partial class ConfigurationBindingGenerator : IIncrementalGenerato { private sealed partial class Emitter { - private readonly SourceProductionContext _context; - private readonly SourceGenerationSpec _sourceGenSpec; + private readonly InterceptorInfo _interceptorInfo; + private readonly BindingHelperInfo _bindingHelperInfo; + private readonly TypeIndex _typeIndex; + private readonly SourceWriter _writer = new(); - public Emitter(SourceProductionContext context, SourceGenerationSpec sourceGenSpec) + public Emitter(SourceGenerationSpec sourceGenSpec) { - _context = context; - _sourceGenSpec = sourceGenSpec; + _interceptorInfo = sourceGenSpec.InterceptorInfo; + _bindingHelperInfo = sourceGenSpec.BindingHelperInfo; + _typeIndex = new TypeIndex(sourceGenSpec.ConfigTypes); } - public void Emit() + public void Emit(SourceProductionContext context) { - if (!ShouldEmitBindingExtensions()) + if (!ShouldEmitMethods(MethodsToGen.Any)) { return; } @@ -52,7 +54,7 @@ file static class {{Identifier.BindingExtensions}} EmitEndBlock(); // Binding namespace. - _context.AddSource($"{Identifier.BindingExtensions}.g.cs", _writer.ToSourceText()); + context.AddSource($"{Identifier.BindingExtensions}.g.cs", _writer.ToSourceText()); } private void EmitInterceptsLocationAttrDecl() @@ -79,7 +81,7 @@ public InterceptsLocationAttribute(string filePath, int line, int column) private void EmitUsingStatements() { - foreach (string @namespace in _sourceGenSpec.Namespaces.ToImmutableSortedSet()) + foreach (string @namespace in _bindingHelperInfo.Namespaces) { _writer.WriteLine($"using {@namespace};"); } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.Parser.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.Parser.cs index 2a6f5d2126e8c..d01c5dbae13f3 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.Parser.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.Parser.cs @@ -7,45 +7,73 @@ using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Linq; +using System.Threading; using Microsoft.CodeAnalysis; -using Microsoft.CodeAnalysis.Operations; using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { public sealed partial class ConfigurationBindingGenerator : IIncrementalGenerator { - private sealed partial class Parser + internal sealed partial class Parser(CompilationData compilationData) { - private record struct InvocationDiagnosticInfo(DiagnosticDescriptor Descriptor, object[]? MessageArgs); + private readonly KnownTypeSymbols _typeSymbols = compilationData.TypeSymbols!; + private readonly bool _langVersionIsSupported = compilationData.LanguageVersionIsSupported; - private readonly SourceProductionContext _context; - private readonly SourceGenerationSpec _sourceGenSpec = new(); - private readonly KnownTypeSymbols _typeSymbols; - private readonly ImmutableArray _invocations; + private readonly List _invocationTypeParseInfo = new(); + private readonly Queue _typesToParse = new(); + private readonly Dictionary _createdTypeSpecs = new(SymbolEqualityComparer.Default); - private readonly Dictionary _createdSpecs = new(SymbolEqualityComparer.Default); - private readonly HashSet _unsupportedTypes = new(SymbolEqualityComparer.Default); + private readonly InterceptorInfo.Builder _interceptorInfoBuilder = new(); + private BindingHelperInfo.Builder? _helperInfoBuilder; // Init'ed with type index when registering interceptors, after creating type specs. - private readonly List _invocationTargetTypeDiags = new(); - private readonly Dictionary> _typeDiagnostics = new(SymbolEqualityComparer.Default); + public List? Diagnostics { get; private set; } - public Parser(SourceProductionContext context, KnownTypeSymbols typeSymbols, ImmutableArray invocations) + public SourceGenerationSpec? GetSourceGenerationSpec(ImmutableArray invocations, CancellationToken cancellationToken) { - _context = context; - _typeSymbols = typeSymbols; - _invocations = invocations; - } + if (!_langVersionIsSupported) + { + RecordDiagnostic(DiagnosticDescriptors.LanguageVersionNotSupported, trimmedLocation: Location.None); + return null; + } - public SourceGenerationSpec? GetSourceGenerationSpec() - { if (_typeSymbols is not { IConfiguration: { }, ConfigurationBinder: { } }) { return null; } - foreach (BinderInvocation invocation in _invocations) + ParseInvocations(invocations); + CreateTypeSpecs(cancellationToken); + RegisterInterceptors(); + + return new SourceGenerationSpec { + InterceptorInfo = _interceptorInfoBuilder.ToIncrementalValue(), + BindingHelperInfo = _helperInfoBuilder!.ToIncrementalValue(), + ConfigTypes = _createdTypeSpecs.Values.OrderBy(s => s.TypeRef.FullyQualifiedName).ToImmutableEquatableArray(), + }; + } + + private bool IsValidRootConfigType([NotNullWhen(true)] ITypeSymbol? type) + { + if (type is null || + type.SpecialType is SpecialType.System_Object or SpecialType.System_Void || + !_typeSymbols.Compilation.IsSymbolAccessibleWithin(type, _typeSymbols.Compilation.Assembly) || + type.TypeKind is TypeKind.TypeParameter or TypeKind.Pointer or TypeKind.Error || + type.IsRefLikeType || + ContainsGenericParameters(type)) + { + return false; + } + + return true; + } + + private void ParseInvocations(ImmutableArray invocations) + { + foreach (BinderInvocation? invocation in invocations) + { + Debug.Assert(invocation is not null); IMethodSymbol targetMethod = invocation.Operation.TargetMethod; INamedTypeSymbol? candidateBinderType = targetMethod.ContainingType; Debug.Assert(targetMethod.IsExtensionMethod); @@ -63,174 +91,124 @@ public Parser(SourceProductionContext context, KnownTypeSymbols typeSymbols, Imm ParseInvocation_ServiceCollectionExt(invocation); } } - - return _sourceGenSpec; } - private bool IsValidRootConfigType(ITypeSymbol? type) + private void CreateTypeSpecs(CancellationToken cancellationToken) { - if (type is null || - type.SpecialType is SpecialType.System_Object or SpecialType.System_Void || - !_typeSymbols.Compilation.IsSymbolAccessibleWithin(type, _typeSymbols.Compilation.Assembly) || - type.TypeKind is TypeKind.TypeParameter or TypeKind.Pointer or TypeKind.Error || - type.IsRefLikeType || - ContainsGenericParameters(type)) + while (_typesToParse.Count > 0) { - return false; - } + cancellationToken.ThrowIfCancellationRequested(); - return true; + TypeParseInfo typeParseInfo = _typesToParse.Dequeue(); + ITypeSymbol typeSymbol = typeParseInfo.TypeSymbol; + + if (!_createdTypeSpecs.ContainsKey(typeSymbol)) + { + _createdTypeSpecs.Add(typeSymbol, CreateTypeSpec(typeParseInfo)); + } + } } - private TypeSpec? GetTargetTypeForRootInvocation(ITypeSymbol? type, Location? invocationLocation) + private void RegisterInterceptors() { - if (!IsValidRootConfigType(type)) + TypeIndex typeIndex = new(_createdTypeSpecs.Values); + _helperInfoBuilder = new(typeIndex); + + foreach (TypeParseInfo typeParseInfo in _invocationTypeParseInfo) { - _context.ReportDiagnostic(Diagnostic.Create(Diagnostics.CouldNotDetermineTypeInfo, invocationLocation)); - return null; + TypeSpec typeSpec = _createdTypeSpecs[typeParseInfo.TypeSymbol]; + MethodsToGen overload = typeParseInfo.BindingOverload; + + if ((MethodsToGen.ConfigBinder_Any & overload) is not 0) + { + RegisterInterceptor_ConfigurationBinder(typeParseInfo, typeSpec); + } + else if ((MethodsToGen.OptionsBuilderExt_Any & overload) is not 0) + { + RegisterInterceptor_OptionsBuilderExt(typeParseInfo, typeSpec); + } + else + { + Debug.Assert((MethodsToGen.ServiceCollectionExt_Any & overload) is not 0); + RegisterInterceptor_ServiceCollectionExt(typeParseInfo, typeSpec); + } } + } - return GetTargetTypeForRootInvocationCore(type, invocationLocation); + private void EnqueueTargetTypeForRootInvocation(ITypeSymbol? typeSymbol, MethodsToGen overload, BinderInvocation invocation) + { + if (!IsValidRootConfigType(typeSymbol)) + { + RecordDiagnostic(DiagnosticDescriptors.CouldNotDetermineTypeInfo, invocation.Location); + } + else + { + TypeParseInfo typeParseInfo = TypeParseInfo.Create(typeSymbol, overload, invocation, containingTypeDiagInfo: null); + _typesToParse.Enqueue(typeParseInfo); + _invocationTypeParseInfo.Add(typeParseInfo); + } } - public TypeSpec? GetTargetTypeForRootInvocationCore(ITypeSymbol type, Location? invocationLocation) + private TypeRef EnqueueTransitiveType(TypeParseInfo containingTypeParseInfo, ITypeSymbol memberTypeSymbol, DiagnosticDescriptor diagDescriptor, string? memberName = null) { - TypeSpec? spec = GetOrCreateTypeSpec(type); + TypeParseInfo memberTypeParseInfo = containingTypeParseInfo.ToTransitiveTypeParseInfo(memberTypeSymbol, diagDescriptor, memberName); - foreach (InvocationDiagnosticInfo diag in _invocationTargetTypeDiags) + if (_createdTypeSpecs.TryGetValue(memberTypeSymbol, out TypeSpec? memberTypeSpec)) { - _context.ReportDiagnostic(Diagnostic.Create(diag.Descriptor, invocationLocation, diag.MessageArgs)); + RecordTypeDiagnosticIfRequired(memberTypeParseInfo, memberTypeSpec); + return memberTypeSpec.TypeRef; } - _invocationTargetTypeDiags.Clear(); - return spec; + _typesToParse.Enqueue(memberTypeParseInfo); + return new TypeRef(memberTypeSymbol); } - private TypeSpec? GetOrCreateTypeSpec(ITypeSymbol type) + private TypeSpec CreateTypeSpec(TypeParseInfo typeParseInfo) { - if (_createdSpecs.TryGetValue(type, out TypeSpec? spec)) - { - if (_typeDiagnostics.TryGetValue(type, out HashSet? typeDiags)) - { - _invocationTargetTypeDiags.AddRange(typeDiags); - } - - return spec; - } + ITypeSymbol type = typeParseInfo.TypeSymbol; + TypeSpec spec; if (IsNullable(type, out ITypeSymbol? underlyingType)) { - spec = MemberTypeIsBindable(type, underlyingType, Diagnostics.NullableUnderlyingTypeNotSupported, out TypeSpec? underlyingTypeSpec) - ? new NullableSpec(type, underlyingTypeSpec) - : null; + TypeRef underlyingTypeRef = EnqueueTransitiveType( + typeParseInfo, + underlyingType, + DiagnosticDescriptors.NullableUnderlyingTypeNotSupported); + + spec = new NullableSpec(type, underlyingTypeRef); } else if (IsParsableFromString(type, out StringParsableTypeKind specialTypeKind)) { ParsableFromStringSpec stringParsableSpec = new(type) { StringParsableTypeKind = specialTypeKind }; - - if (stringParsableSpec.StringParsableTypeKind is not StringParsableTypeKind.AssignFromSectionValue) - { - _sourceGenSpec.PrimitivesForHelperGen.Add(stringParsableSpec); - } - spec = stringParsableSpec; } - else if (IsSupportedArrayType(type)) + else if (type.TypeKind is TypeKind.Array) { - spec = CreateArraySpec((type as IArrayTypeSymbol)); + spec = CreateArraySpec(typeParseInfo); + Debug.Assert(spec is ArraySpec or UnsupportedTypeSpec); } else if (IsCollection(type)) { - spec = CreateCollectionSpec((INamedTypeSymbol)type); + spec = CreateCollectionSpec(typeParseInfo); } else if (SymbolEqualityComparer.Default.Equals(type, _typeSymbols.IConfigurationSection)) { spec = new ConfigurationSectionSpec(type); } - else if (type is INamedTypeSymbol namedType) + else if (type is INamedTypeSymbol) { - // List is used in generated code as a temp holder for formatting - // an error for config properties that don't map to object properties. - _sourceGenSpec.Namespaces.Add("System.Collections.Generic"); - - spec = CreateObjectSpec(namedType); + spec = CreateObjectSpec(typeParseInfo); } else { - RegisterUnsupportedType(type, Diagnostics.TypeNotSupported); + spec = CreateUnsupportedTypeSpec(typeParseInfo, NotSupportedReason.UnknownType); } - foreach (InvocationDiagnosticInfo diag in _invocationTargetTypeDiags) - { - RegisterTypeDiagnostic(type, diag); - } - - if (spec is { Namespace: string @namespace } && @namespace is not "") - { - _sourceGenSpec.Namespaces.Add(@namespace); - } - - return _createdSpecs[type] = spec; - } - - private bool TryRegisterTypeForBindCoreMainGen(ComplexTypeSpec type) - { - if (type.HasBindableMembers) - { - bool registeredForBindCoreGen = TryRegisterTypeForBindCoreGen(type); - Debug.Assert(registeredForBindCoreGen); - - RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.BindCoreMain, type); - Register_AsConfigWithChildren_HelperForGen_IfRequired(type); - return true; - } - - return false; - } + RecordTypeDiagnosticIfRequired(typeParseInfo, spec); - private bool TryRegisterTypeForBindCoreGen(ComplexTypeSpec type) - { - if (type.HasBindableMembers) - { - RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.BindCore, type); - return true; - } - - return false; - } - - private void RegisterTypeForGetCoreGen(TypeSpec typeSpec) - { - RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.GetCore, typeSpec); - Register_AsConfigWithChildren_HelperForGen_IfRequired(typeSpec); - } - - private void RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper method, TypeSpec type) - { - if (!_sourceGenSpec.TypesForGen_CoreBindingHelper_Methods.TryGetValue(method, out HashSet? types)) - { - _sourceGenSpec.TypesForGen_CoreBindingHelper_Methods[method] = types = new HashSet(); - } - - types.Add(type); - _sourceGenSpec.MethodsToGen_CoreBindingHelper |= method; - } - - private void Register_AsConfigWithChildren_HelperForGen_IfRequired(TypeSpec possibleComplexType) - { - if (possibleComplexType is ComplexTypeSpec) - { - _sourceGenSpec.MethodsToGen_CoreBindingHelper |= MethodsToGen_CoreBindingHelper.AsConfigWithChildren; - } + return spec; } - /// - /// Registers interceptors for root binding methods, except for ConfigurationBinder.Bind, - /// which is handled by - /// - private void RegisterInterceptor(Enum method, IInvocationOperation operation) => - _sourceGenSpec.InterceptionInfo.RegisterCacheEntry(method, new InterceptorLocationInfo(operation)); - private static bool IsNullable(ITypeSymbol type, [NotNullWhen(true)] out ITypeSymbol? underlyingType) { if (type is INamedTypeSymbol { IsGenericType: true } genericType && @@ -349,232 +327,197 @@ private bool IsParsableFromString(ITypeSymbol type, out StringParsableTypeKind t } } - private EnumerableSpec? CreateArraySpec(IArrayTypeSymbol arrayTypeSymbol) + private TypeSpec CreateArraySpec(TypeParseInfo typeParseInfo) { - ITypeSymbol elementTypeSymbol = arrayTypeSymbol.ElementType; + IArrayTypeSymbol typeSymbol = (IArrayTypeSymbol)typeParseInfo.TypeSymbol; - if (!MemberTypeIsBindable(arrayTypeSymbol, elementTypeSymbol, Diagnostics.ElementTypeNotSupported, out TypeSpec elementTypeSpec)) + if (typeSymbol.Rank > 1) { - return null; + return CreateUnsupportedTypeSpec(typeParseInfo, NotSupportedReason.MultiDimArraysNotSupported); } - // We want a BindCore method for List as a temp holder for the array values. - // Since the element type is supported, we can certainly a list of elements. - EnumerableSpec listTypeSpec = (EnumerableSpec)GetOrCreateTypeSpec(_typeSymbols.List.Construct(elementTypeSymbol)); + TypeRef elementTypeRef = EnqueueTransitiveType( + typeParseInfo, + typeSymbol.ElementType, + DiagnosticDescriptors.ElementTypeNotSupported); - EnumerableSpec spec = new EnumerableSpec(arrayTypeSymbol) + return new ArraySpec(typeSymbol) { - ElementType = elementTypeSpec, - InstantiationStrategy = InstantiationStrategy.Array, - PopulationStrategy = CollectionPopulationStrategy.Cast_Then_Add, // Using the concrete list type as a temp holder. - TypeToInstantiate = listTypeSpec, - PopulationCastType = null, + ElementTypeRef = elementTypeRef, }; - - bool registeredForBindCore = TryRegisterTypeForBindCoreGen(listTypeSpec) && TryRegisterTypeForBindCoreGen(spec); - Debug.Assert(registeredForBindCore); - return spec; } - private CollectionSpec? CreateCollectionSpec(INamedTypeSymbol type) + private TypeSpec CreateCollectionSpec(TypeParseInfo typeParseInfo) { - CollectionSpec? spec; - if (IsCandidateDictionary(type, out ITypeSymbol keyType, out ITypeSymbol elementType)) + INamedTypeSymbol type = (INamedTypeSymbol)typeParseInfo.TypeSymbol; + + TypeSpec spec; + if (IsCandidateDictionary(type, out ITypeSymbol? keyType, out ITypeSymbol? elementType)) { - spec = CreateDictionarySpec(type, keyType, elementType); - Debug.Assert(spec is null or DictionarySpec { KeyType: null or ParsableFromStringSpec }); + spec = CreateDictionarySpec(typeParseInfo, keyType, elementType); + Debug.Assert(spec is DictionarySpec or UnsupportedTypeSpec); } else { - spec = CreateEnumerableSpec(type); + spec = CreateEnumerableSpec(typeParseInfo); + Debug.Assert(spec is EnumerableSpec or UnsupportedTypeSpec); } - if (spec is null) - { - return null; - } - - bool registerForBindCoreGen = TryRegisterTypeForBindCoreGen(spec); - Debug.Assert(registerForBindCoreGen); return spec; } - private DictionarySpec CreateDictionarySpec(INamedTypeSymbol type, ITypeSymbol keyType, ITypeSymbol elementType) + private TypeSpec CreateDictionarySpec(TypeParseInfo typeParseInfo, ITypeSymbol keyTypeSymbol, ITypeSymbol elementTypeSymbol) { - if (!MemberTypeIsBindable(type, keyType, Diagnostics.DictionaryKeyNotSupported, out TypeSpec keySpec) || - !MemberTypeIsBindable(type, elementType, Diagnostics.ElementTypeNotSupported, out TypeSpec elementSpec)) - { - return null; - } + INamedTypeSymbol type = (INamedTypeSymbol)typeParseInfo.TypeSymbol; - if (keySpec.SpecKind is not TypeSpecKind.ParsableFromString) - { - RegisterUnsupportedType(type, Diagnostics.DictionaryKeyNotSupported); - return null; - } - - InstantiationStrategy constructionStrategy; - CollectionPopulationStrategy populationStrategy; - INamedTypeSymbol? typeToInstantiate = null; - INamedTypeSymbol? populationCastType = null; + CollectionInstantiationStrategy instantiationStrategy; + CollectionInstantiationConcreteType instantiationConcreteType; + CollectionPopulationCastType populationCastType; if (HasPublicParameterLessCtor(type)) { - constructionStrategy = InstantiationStrategy.ParameterlessConstructor; + instantiationStrategy = CollectionInstantiationStrategy.ParameterlessConstructor; + instantiationConcreteType = CollectionInstantiationConcreteType.Self; - if (HasAddMethod(type, keyType, elementType)) + if (HasAddMethod(type, keyTypeSymbol, elementTypeSymbol)) { - populationStrategy = CollectionPopulationStrategy.Add; + populationCastType = CollectionPopulationCastType.NotApplicable; } - else if (GetInterface(type, _typeSymbols.GenericIDictionary_Unbound) is not null) + else if (_typeSymbols.GenericIDictionary is not null && GetInterface(type, _typeSymbols.GenericIDictionary_Unbound) is not null) { - populationCastType = _typeSymbols.GenericIDictionary; - populationStrategy = CollectionPopulationStrategy.Cast_Then_Add; + populationCastType = CollectionPopulationCastType.IDictionary; } else { - RegisterUnsupportedType(type, Diagnostics.CollectionNotSupported); - return null; + return CreateUnsupportedCollectionSpec(typeParseInfo); } } - else if (IsInterfaceMatch(type, _typeSymbols.GenericIDictionary_Unbound) || IsInterfaceMatch(type, _typeSymbols.IDictionary)) + else if (_typeSymbols.Dictionary is not null && + (IsInterfaceMatch(type, _typeSymbols.GenericIDictionary_Unbound) || IsInterfaceMatch(type, _typeSymbols.IDictionary))) { - typeToInstantiate = _typeSymbols.Dictionary; - constructionStrategy = InstantiationStrategy.ParameterlessConstructor; - populationStrategy = CollectionPopulationStrategy.Add; + instantiationStrategy = CollectionInstantiationStrategy.ParameterlessConstructor; + instantiationConcreteType = CollectionInstantiationConcreteType.Dictionary; + populationCastType = CollectionPopulationCastType.NotApplicable; } - else if (IsInterfaceMatch(type, _typeSymbols.IReadOnlyDictionary_Unbound)) + else if (_typeSymbols.Dictionary is not null && IsInterfaceMatch(type, _typeSymbols.IReadOnlyDictionary_Unbound)) { - typeToInstantiate = _typeSymbols.Dictionary; - populationCastType = _typeSymbols.GenericIDictionary; - constructionStrategy = InstantiationStrategy.ToEnumerableMethod; - populationStrategy = CollectionPopulationStrategy.Cast_Then_Add; - _sourceGenSpec.Namespaces.Add("System.Linq"); + instantiationStrategy = CollectionInstantiationStrategy.LinqToDictionary; + instantiationConcreteType = CollectionInstantiationConcreteType.Dictionary; + populationCastType = CollectionPopulationCastType.IDictionary; } else { - RegisterUnsupportedType(type, Diagnostics.CollectionNotSupported); - return null; + return CreateUnsupportedCollectionSpec(typeParseInfo); } - Debug.Assert(!(populationStrategy is CollectionPopulationStrategy.Cast_Then_Add && populationCastType is null)); + TypeRef keyTypeRef = EnqueueTransitiveType(typeParseInfo, keyTypeSymbol, DiagnosticDescriptors.DictionaryKeyNotSupported); + TypeRef elementTypeRef = EnqueueTransitiveType(typeParseInfo, elementTypeSymbol, DiagnosticDescriptors.ElementTypeNotSupported); - DictionarySpec spec = new(type) + return new DictionarySpec(type) { - KeyType = (ParsableFromStringSpec)keySpec, - ElementType = elementSpec, - InstantiationStrategy = constructionStrategy, - PopulationStrategy = populationStrategy, - TypeToInstantiate = ConstructGenericCollectionSpecIfRequired(typeToInstantiate, keyType, elementType) as DictionarySpec, - PopulationCastType = ConstructGenericCollectionSpecIfRequired(populationCastType, keyType, elementType) as DictionarySpec, + KeyTypeRef = keyTypeRef, + ElementTypeRef = elementTypeRef, + InstantiationStrategy = instantiationStrategy, + InstantiationConcreteType = instantiationConcreteType, + PopulationCastType = populationCastType, }; - - return spec; } - private EnumerableSpec? CreateEnumerableSpec(INamedTypeSymbol type) + private TypeSpec CreateEnumerableSpec(TypeParseInfo typeParseInfo) { - if (!TryGetElementType(type, out ITypeSymbol? elementType) || - !MemberTypeIsBindable(type, elementType, Diagnostics.ElementTypeNotSupported, out TypeSpec elementSpec)) + INamedTypeSymbol type = (INamedTypeSymbol)typeParseInfo.TypeSymbol; + + if (!TryGetElementType(type, out ITypeSymbol? elementType)) { - return null; + return CreateUnsupportedCollectionSpec(typeParseInfo); } - InstantiationStrategy instantiationStrategy; - CollectionPopulationStrategy populationStrategy; - INamedTypeSymbol? typeToInstantiate = null; - INamedTypeSymbol? populationCastType = null; + CollectionInstantiationStrategy instantiationStrategy; + CollectionInstantiationConcreteType instantiationConcreteType; + CollectionPopulationCastType populationCastType; if (HasPublicParameterLessCtor(type)) { - instantiationStrategy = InstantiationStrategy.ParameterlessConstructor; + instantiationStrategy = CollectionInstantiationStrategy.ParameterlessConstructor; + instantiationConcreteType = CollectionInstantiationConcreteType.Self; if (HasAddMethod(type, elementType)) { - populationStrategy = CollectionPopulationStrategy.Add; + populationCastType = CollectionPopulationCastType.NotApplicable; } - else if (GetInterface(type, _typeSymbols.GenericICollection_Unbound) is not null) + else if (_typeSymbols.GenericICollection is not null && GetInterface(type, _typeSymbols.GenericICollection_Unbound) is not null) { - populationCastType = _typeSymbols.GenericICollection; - populationStrategy = CollectionPopulationStrategy.Cast_Then_Add; + populationCastType = CollectionPopulationCastType.ICollection; } else { - RegisterUnsupportedType(type, Diagnostics.CollectionNotSupported); - return null; + return CreateUnsupportedCollectionSpec(typeParseInfo); } } - else if (IsInterfaceMatch(type, _typeSymbols.GenericICollection_Unbound) || - IsInterfaceMatch(type, _typeSymbols.GenericIList_Unbound)) + else if ((IsInterfaceMatch(type, _typeSymbols.GenericICollection_Unbound) || IsInterfaceMatch(type, _typeSymbols.GenericIList_Unbound))) { - typeToInstantiate = _typeSymbols.List; - instantiationStrategy = InstantiationStrategy.ParameterlessConstructor; - populationStrategy = CollectionPopulationStrategy.Add; + instantiationStrategy = CollectionInstantiationStrategy.ParameterlessConstructor; + instantiationConcreteType = CollectionInstantiationConcreteType.List; + populationCastType = CollectionPopulationCastType.NotApplicable; } else if (IsInterfaceMatch(type, _typeSymbols.GenericIEnumerable_Unbound)) { - typeToInstantiate = _typeSymbols.List; - populationCastType = _typeSymbols.GenericICollection; - instantiationStrategy = InstantiationStrategy.ParameterizedConstructor; - populationStrategy = CollectionPopulationStrategy.Cast_Then_Add; + instantiationStrategy = CollectionInstantiationStrategy.CopyConstructor; + instantiationConcreteType = CollectionInstantiationConcreteType.List; + populationCastType = CollectionPopulationCastType.ICollection; } else if (IsInterfaceMatch(type, _typeSymbols.ISet_Unbound)) { - typeToInstantiate = _typeSymbols.HashSet; - instantiationStrategy = InstantiationStrategy.ParameterlessConstructor; - populationStrategy = CollectionPopulationStrategy.Add; + instantiationStrategy = CollectionInstantiationStrategy.ParameterlessConstructor; + instantiationConcreteType = CollectionInstantiationConcreteType.HashSet; + populationCastType = CollectionPopulationCastType.NotApplicable; } else if (IsInterfaceMatch(type, _typeSymbols.IReadOnlySet_Unbound)) { - typeToInstantiate = _typeSymbols.HashSet; - populationCastType = _typeSymbols.ISet; - instantiationStrategy = InstantiationStrategy.ParameterizedConstructor; - populationStrategy = CollectionPopulationStrategy.Cast_Then_Add; + instantiationStrategy = CollectionInstantiationStrategy.CopyConstructor; + instantiationConcreteType = CollectionInstantiationConcreteType.HashSet; + populationCastType = CollectionPopulationCastType.ISet; } else if (IsInterfaceMatch(type, _typeSymbols.IReadOnlyList_Unbound) || IsInterfaceMatch(type, _typeSymbols.IReadOnlyCollection_Unbound)) { - typeToInstantiate = _typeSymbols.List; - populationCastType = _typeSymbols.GenericICollection; - instantiationStrategy = InstantiationStrategy.ParameterizedConstructor; - populationStrategy = CollectionPopulationStrategy.Cast_Then_Add; + instantiationStrategy = CollectionInstantiationStrategy.CopyConstructor; + instantiationConcreteType = CollectionInstantiationConcreteType.List; + populationCastType = CollectionPopulationCastType.ICollection; } else { - RegisterUnsupportedType(type, Diagnostics.CollectionNotSupported); - return null; + return CreateUnsupportedCollectionSpec(typeParseInfo); } - Debug.Assert(!(populationStrategy is CollectionPopulationStrategy.Cast_Then_Add && populationCastType is null)); + TypeRef elementTypeRef = EnqueueTransitiveType(typeParseInfo, elementType, DiagnosticDescriptors.ElementTypeNotSupported); - EnumerableSpec spec = new(type) + return new EnumerableSpec(type) { - ElementType = elementSpec, + ElementTypeRef = elementTypeRef, InstantiationStrategy = instantiationStrategy, - PopulationStrategy = populationStrategy, - TypeToInstantiate = ConstructGenericCollectionSpecIfRequired(typeToInstantiate, elementType) as EnumerableSpec, - PopulationCastType = ConstructGenericCollectionSpecIfRequired(populationCastType, elementType) as EnumerableSpec, + InstantiationConcreteType = instantiationConcreteType, + PopulationCastType = populationCastType, }; - - return spec; } - private ObjectSpec? CreateObjectSpec(INamedTypeSymbol objectSymbol) + private ObjectSpec CreateObjectSpec(TypeParseInfo typeParseInfo) { - // Add spec to cache before traversing properties to avoid stack overflow. - ObjectSpec objectSpec = new(objectSymbol); - _createdSpecs.Add(objectSymbol, objectSpec); + INamedTypeSymbol typeSymbol = (INamedTypeSymbol)typeParseInfo.TypeSymbol; + string typeName = typeSymbol.GetTypeName().Name; - string typeName = objectSpec.Name; - IMethodSymbol? ctor = null; + ObjectInstantiationStrategy initializationStrategy = ObjectInstantiationStrategy.None; DiagnosticDescriptor? initDiagDescriptor = null; + string? initExceptionMessage = null; + + IMethodSymbol? ctor = null; - if (!(objectSymbol.IsAbstract || objectSymbol.TypeKind is TypeKind.Interface)) + if (!(typeSymbol.IsAbstract || typeSymbol.TypeKind is TypeKind.Interface)) { IMethodSymbol? parameterlessCtor = null; IMethodSymbol? parameterizedCtor = null; bool hasMultipleParameterizedCtors = false; - foreach (IMethodSymbol candidate in objectSymbol.InstanceConstructors) + foreach (IMethodSymbol candidate in typeSymbol.InstanceConstructors) { if (candidate.DeclaredAccessibility is not Accessibility.Public) { @@ -595,14 +538,14 @@ private DictionarySpec CreateDictionarySpec(INamedTypeSymbol type, ITypeSymbol k } } - bool hasPublicParameterlessCtor = objectSymbol.IsValueType || parameterlessCtor is not null; + bool hasPublicParameterlessCtor = typeSymbol.IsValueType || parameterlessCtor is not null; if (!hasPublicParameterlessCtor && hasMultipleParameterizedCtors) { - initDiagDescriptor = Diagnostics.MultipleParameterizedConstructors; - objectSpec.InitExceptionMessage = string.Format(Emitter.ExceptionMessages.MultipleParameterizedConstructors, typeName); + initDiagDescriptor = DiagnosticDescriptors.MultipleParameterizedConstructors; + initExceptionMessage = string.Format(Emitter.ExceptionMessages.MultipleParameterizedConstructors, typeName); } - ctor = objectSymbol.IsValueType + ctor = typeSymbol.IsValueType // Roslyn ctor fetching APIs include paramerterless ctors for structs, unlike System.Reflection. ? parameterizedCtor ?? parameterlessCtor : parameterlessCtor ?? parameterizedCtor; @@ -610,21 +553,23 @@ private DictionarySpec CreateDictionarySpec(INamedTypeSymbol type, ITypeSymbol k if (ctor is null) { - initDiagDescriptor = Diagnostics.MissingPublicInstanceConstructor; - objectSpec.InitExceptionMessage = string.Format(Emitter.ExceptionMessages.MissingPublicInstanceConstructor, typeName); + initDiagDescriptor = DiagnosticDescriptors.MissingPublicInstanceConstructor; + initExceptionMessage = string.Format(Emitter.ExceptionMessages.MissingPublicInstanceConstructor, typeName); } else { - objectSpec.InstantiationStrategy = ctor.Parameters.Length is 0 ? InstantiationStrategy.ParameterlessConstructor : InstantiationStrategy.ParameterizedConstructor; + initializationStrategy = ctor.Parameters.Length is 0 ? ObjectInstantiationStrategy.ParameterlessConstructor : ObjectInstantiationStrategy.ParameterizedConstructor; } if (initDiagDescriptor is not null) { - Debug.Assert(objectSpec.InitExceptionMessage is not null); - RegisterUnsupportedType(objectSymbol, initDiagDescriptor); + Debug.Assert(initExceptionMessage is not null); + RecordTypeDiagnostic(typeParseInfo, initDiagDescriptor); } - INamedTypeSymbol current = objectSymbol; + Dictionary? properties = null; + + INamedTypeSymbol? current = typeSymbol; while (current is not null) { ImmutableArray members = current.GetMembers(); @@ -633,105 +578,90 @@ private DictionarySpec CreateDictionarySpec(INamedTypeSymbol type, ITypeSymbol k if (member is IPropertySymbol { IsIndexer: false, IsImplicitlyDeclared: false } property) { string propertyName = property.Name; - TypeSpec propertyTypeSpec = GetOrCreateTypeSpec(property.Type); + TypeRef propertyTypeRef = EnqueueTransitiveType(typeParseInfo, property.Type, DiagnosticDescriptors.PropertyNotSupported, propertyName); - if (propertyTypeSpec?.CanBindTo is not true) - { - InvocationDiagnosticInfo propertyDiagnostic = new InvocationDiagnosticInfo(Diagnostics.PropertyNotSupported, new string[] { propertyName, objectSymbol.ToDisplayString() }); - RegisterTypeDiagnostic(causingType: objectSymbol, propertyDiagnostic); - _invocationTargetTypeDiags.Add(propertyDiagnostic); - } + AttributeData? attributeData = property.GetAttributes().FirstOrDefault(a => SymbolEqualityComparer.Default.Equals(a.AttributeClass, _typeSymbols.ConfigurationKeyNameAttribute)); + string configKeyName = attributeData?.ConstructorArguments.FirstOrDefault().Value as string ?? propertyName; - if (propertyTypeSpec is not null) + PropertySpec spec = new(property) { - AttributeData? attributeData = property.GetAttributes().FirstOrDefault(a => SymbolEqualityComparer.Default.Equals(a.AttributeClass, _typeSymbols.ConfigurationKeyNameAttribute)); - string configKeyName = attributeData?.ConstructorArguments.FirstOrDefault().Value as string ?? propertyName; - PropertySpec spec = new(property) { Type = propertyTypeSpec, ConfigurationKeyName = configKeyName }; + TypeRef = propertyTypeRef, + ConfigurationKeyName = configKeyName + }; - objectSpec.Properties[propertyName] = spec; - Register_AsConfigWithChildren_HelperForGen_IfRequired(propertyTypeSpec); - } + (properties ??= new(StringComparer.OrdinalIgnoreCase))[propertyName] = spec; } } current = current.BaseType; } - if (objectSpec.InstantiationStrategy is InstantiationStrategy.ParameterizedConstructor) + List? ctorParams = null; + + if (initializationStrategy is ObjectInstantiationStrategy.ParameterizedConstructor) { - List missingParameters = new(); - List invalidParameters = new(); + Debug.Assert(ctor is not null); + List? missingParameters = null; + List? invalidParameters = null; foreach (IParameterSymbol parameter in ctor.Parameters) { string parameterName = parameter.Name; - if (!objectSpec.Properties.TryGetValue(parameterName, out PropertySpec? propertySpec)) + if (properties?.TryGetValue(parameterName, out PropertySpec? propertySpec) is not true) { - missingParameters.Add(parameterName); + (missingParameters ??= new()).Add(parameterName); } else if (parameter.RefKind is not RefKind.None) { - invalidParameters.Add(parameterName); + (invalidParameters ??= new()).Add(parameterName); } else { ParameterSpec paramSpec = new ParameterSpec(parameter) { - Type = propertySpec.Type, + TypeRef = propertySpec.TypeRef, ConfigurationKeyName = propertySpec.ConfigurationKeyName, }; propertySpec.MatchingCtorParam = paramSpec; - objectSpec.ConstructorParameters.Add(paramSpec); + (ctorParams ??= new()).Add(paramSpec); } } - if (invalidParameters.Count > 0) + if (invalidParameters?.Count > 0) { - objectSpec.InitExceptionMessage = string.Format(Emitter.ExceptionMessages.CannotBindToConstructorParameter, typeName, FormatParams(invalidParameters)); + initExceptionMessage = string.Format(Emitter.ExceptionMessages.CannotBindToConstructorParameter, typeName, FormatParams(invalidParameters)); } - else if (missingParameters.Count > 0) + else if (missingParameters?.Count > 0) { - if (objectSymbol.IsValueType) + if (typeSymbol.IsValueType) { - objectSpec.InstantiationStrategy = InstantiationStrategy.ParameterlessConstructor; + initializationStrategy = ObjectInstantiationStrategy.ParameterlessConstructor; } else { - objectSpec.InitExceptionMessage = string.Format(Emitter.ExceptionMessages.ConstructorParametersDoNotMatchProperties, typeName, FormatParams(missingParameters)); + initExceptionMessage = string.Format(Emitter.ExceptionMessages.ConstructorParametersDoNotMatchProperties, typeName, FormatParams(missingParameters)); } } - if (objectSpec.CanInstantiate) - { - RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.Initialize, objectSpec); - } - static string FormatParams(List names) => string.Join(",", names); } - Debug.Assert((objectSpec.CanInstantiate && objectSpec.InitExceptionMessage is null) || - (!objectSpec.CanInstantiate && objectSpec.InitExceptionMessage is not null) || - (!objectSpec.CanInstantiate && (objectSymbol.IsAbstract || objectSymbol.TypeKind is TypeKind.Interface))); - - TryRegisterTypeForBindCoreGen(objectSpec); - return objectSpec; + return new ObjectSpec( + typeSymbol, + initializationStrategy, + properties: properties?.Values.ToImmutableEquatableArray(), + constructorParameters: ctorParams?.ToImmutableEquatableArray(), + initExceptionMessage); } - private bool MemberTypeIsBindable(ITypeSymbol containingTypeSymbol, ITypeSymbol memberTypeSymbol, DiagnosticDescriptor containingTypeDiagDescriptor, out TypeSpec? memberTypeSpec) - { - if (GetOrCreateTypeSpec(memberTypeSymbol) is TypeSpec { CanBindTo: true } spec) - { - memberTypeSpec = spec; - return true; - } + private static UnsupportedTypeSpec CreateUnsupportedCollectionSpec(TypeParseInfo typeParseInfo) + => CreateUnsupportedTypeSpec(typeParseInfo, NotSupportedReason.CollectionNotSupported); - RegisterUnsupportedType(containingTypeSymbol, containingTypeDiagDescriptor); - memberTypeSpec = null; - return false; - } + private static UnsupportedTypeSpec CreateUnsupportedTypeSpec(TypeParseInfo typeParseInfo, NotSupportedReason reason) => + new(typeParseInfo.TypeSymbol) { NotSupportedReason = reason }; - private bool TryGetElementType(INamedTypeSymbol type, out ITypeSymbol? elementType) + private bool TryGetElementType(INamedTypeSymbol type, [NotNullWhen(true)] out ITypeSymbol? elementType) { INamedTypeSymbol? candidate = GetInterface(type, _typeSymbols.GenericIEnumerable_Unbound); @@ -745,7 +675,7 @@ private bool TryGetElementType(INamedTypeSymbol type, out ITypeSymbol? elementTy return false; } - private bool IsCandidateDictionary(INamedTypeSymbol type, out ITypeSymbol? keyType, out ITypeSymbol? elementType) + private bool IsCandidateDictionary(INamedTypeSymbol type, [NotNullWhen(true)] out ITypeSymbol? keyType, [NotNullWhen(true)] out ITypeSymbol? elementType) { INamedTypeSymbol? candidate = GetInterface(type, _typeSymbols.GenericIDictionary_Unbound) ?? GetInterface(type, _typeSymbols.IReadOnlyDictionary_Unbound); @@ -771,24 +701,13 @@ private bool IsCandidateDictionary(INamedTypeSymbol type, out ITypeSymbol? keyTy private bool IsCollection(ITypeSymbol type) => type is INamedTypeSymbol namedType && GetInterface(namedType, _typeSymbols.IEnumerable) is not null; - private bool IsSupportedArrayType(ITypeSymbol type) + private static INamedTypeSymbol? GetInterface(INamedTypeSymbol type, INamedTypeSymbol? @interface) { - if (type is not IArrayTypeSymbol arrayType) + if (@interface is null) { - return false; - } - - if (arrayType.Rank > 1) - { - RegisterUnsupportedType(arrayType, Diagnostics.MultiDimArraysNotSupported); - return false; + return null; } - return true; - } - - private static INamedTypeSymbol? GetInterface(INamedTypeSymbol type, INamedTypeSymbol @interface) - { if (IsInterfaceMatch(type, @interface)) { return type; @@ -805,8 +724,13 @@ private bool IsSupportedArrayType(ITypeSymbol type) return type.AllInterfaces.FirstOrDefault(candidate => SymbolEqualityComparer.Default.Equals(candidate, @interface)); } - private static bool IsInterfaceMatch(INamedTypeSymbol type, INamedTypeSymbol @interface) + private static bool IsInterfaceMatch(INamedTypeSymbol type, INamedTypeSymbol? @interface) { + if (@interface is null) + { + return false; + } + if (type.IsGenericType) { INamedTypeSymbol unbound = type.ConstructUnboundGenericType(); @@ -840,8 +764,8 @@ private static bool HasPublicParameterLessCtor(INamedTypeSymbol type) => private static bool HasAddMethod(INamedTypeSymbol type, ITypeSymbol element) { - INamedTypeSymbol current = type; - while (current != null) + INamedTypeSymbol? current = type; + while (current is not null) { if (current.GetMembers("Add").Any(member => member is IMethodSymbol { Parameters.Length: 1 } method && @@ -856,8 +780,8 @@ private static bool HasAddMethod(INamedTypeSymbol type, ITypeSymbol element) private static bool HasAddMethod(INamedTypeSymbol type, ITypeSymbol key, ITypeSymbol element) { - INamedTypeSymbol current = type; - while (current != null) + INamedTypeSymbol? current = type; + while (current is not null) { if (current.GetMembers("Add").Any(member => member is IMethodSymbol { Parameters.Length: 2 } method && @@ -873,40 +797,51 @@ private static bool HasAddMethod(INamedTypeSymbol type, ITypeSymbol key, ITypeSy private static bool IsEnum(ITypeSymbol type) => type is INamedTypeSymbol { EnumUnderlyingType: INamedTypeSymbol { } }; - private CollectionSpec? ConstructGenericCollectionSpecIfRequired(INamedTypeSymbol? collectionType, params ITypeSymbol[] parameters) => - (collectionType is not null ? ConstructGenericCollectionSpec(collectionType, parameters) : null); - - private CollectionSpec? ConstructGenericCollectionSpec(INamedTypeSymbol type, params ITypeSymbol[] parameters) - { - Debug.Assert(type.IsGenericType); - INamedTypeSymbol constructedType = type.Construct(parameters); - return CreateCollectionSpec(constructedType); - } - - private void RegisterUnsupportedType(ITypeSymbol type, DiagnosticDescriptor descriptor = null) + private void RecordTypeDiagnosticIfRequired(TypeParseInfo typeParseInfo, TypeSpec typeSpec) { - InvocationDiagnosticInfo diagInfo = new(descriptor, new string[] { type.ToDisplayString() }); + ContainingTypeDiagnosticInfo? containingTypeDiagInfo = typeParseInfo.ContainingTypeDiagnosticInfo; - if (!_unsupportedTypes.Contains(type)) + if (typeSpec is UnsupportedTypeSpec unsupportedTypeSpec) + { + DiagnosticDescriptor descriptor = DiagnosticDescriptors.GetNotSupportedDescriptor(unsupportedTypeSpec.NotSupportedReason); + RecordTypeDiagnostic(typeParseInfo, descriptor); + } + else if (containingTypeDiagInfo?.Descriptor == DiagnosticDescriptors.DictionaryKeyNotSupported && + typeSpec is not ParsableFromStringSpec) { - RegisterTypeDiagnostic(type, diagInfo); - _unsupportedTypes.Add(type); + ReportContainingTypeDiagnosticIfRequired(typeParseInfo); } + } - _invocationTargetTypeDiags.Add(diagInfo); + private void RecordTypeDiagnostic(TypeParseInfo typeParseInfo, DiagnosticDescriptor descriptor) + { + RecordDiagnostic(descriptor, typeParseInfo.BinderInvocation.Location, new object?[] { typeParseInfo.TypeName }); + ReportContainingTypeDiagnosticIfRequired(typeParseInfo); } - private void RegisterTypeDiagnostic(ITypeSymbol causingType, InvocationDiagnosticInfo info) + private void ReportContainingTypeDiagnosticIfRequired(TypeParseInfo typeParseInfo) { - bool typeHadDiags = _typeDiagnostics.TryGetValue(causingType, out HashSet? typeDiags); - typeDiags ??= new HashSet(); - typeDiags.Add(info); + ContainingTypeDiagnosticInfo? containingTypeDiagInfo = typeParseInfo.ContainingTypeDiagnosticInfo; - if (!typeHadDiags) + while (containingTypeDiagInfo is not null) { - _typeDiagnostics[causingType] = typeDiags; + string containingTypeName = containingTypeDiagInfo.TypeName; + + object[] messageArgs = containingTypeDiagInfo.MemberName is string memberName + ? new[] { memberName, containingTypeName } + : new[] { containingTypeName }; + + RecordDiagnostic(containingTypeDiagInfo.Descriptor, typeParseInfo.BinderInvocation.Location, messageArgs); + + containingTypeDiagInfo = containingTypeDiagInfo.ContainingTypeInfo; } } + + private void RecordDiagnostic(DiagnosticDescriptor descriptor, Location trimmedLocation, params object?[]? messageArgs) + { + Diagnostics ??= new List(); + Diagnostics.Add(DiagnosticInfo.Create(descriptor, trimmedLocation, messageArgs)); + } } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.cs index fbca2dd3cfc50..ec4b234a61045 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/ConfigurationBindingGenerator.cs @@ -2,9 +2,10 @@ // The .NET Foundation licenses this file to you under the MIT license. //#define LAUNCH_DEBUGGER -using System.Collections.Immutable; +using System; using Microsoft.CodeAnalysis; using Microsoft.CodeAnalysis.CSharp; +using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { @@ -14,7 +15,9 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration [Generator] public sealed partial class ConfigurationBindingGenerator : IIncrementalGenerator { - private static readonly string ProjectName = Emitter.s_assemblyName.Name; + private static readonly string ProjectName = Emitter.s_assemblyName.Name!; + + public const string GenSpecTrackingName = nameof(SourceGenerationSpec); public void Initialize(IncrementalGeneratorInitializationContext context) { @@ -30,39 +33,61 @@ public void Initialize(IncrementalGeneratorInitializationContext context) ? new CompilationData((CSharpCompilation)compilation) : null); - IncrementalValuesProvider inputCalls = context.SyntaxProvider + IncrementalValueProvider<(SourceGenerationSpec?, ImmutableEquatableArray?)> genSpec = context.SyntaxProvider .CreateSyntaxProvider( (node, _) => BinderInvocation.IsCandidateSyntaxNode(node), BinderInvocation.Create) - .Where(invocation => invocation is not null); + .Where(invocation => invocation is not null) + .Collect() + .Combine(compilationData) + .Select((tuple, cancellationToken) => + { + if (tuple.Right is not CompilationData compilationData) + { + return (null, null); + } - IncrementalValueProvider<(CompilationData?, ImmutableArray)> inputData = compilationData.Combine(inputCalls.Collect()); + try + { + Parser parser = new(compilationData); + SourceGenerationSpec? spec = parser.GetSourceGenerationSpec(tuple.Left, cancellationToken); + ImmutableEquatableArray? diagnostics = parser.Diagnostics?.ToImmutableEquatableArray(); + return (spec, diagnostics); + } + catch (Exception ex) + { + throw ex; + } + }) + .WithTrackingName(GenSpecTrackingName); - context.RegisterSourceOutput(inputData, (spc, source) => Execute(source.Item1, source.Item2, spc)); + context.RegisterSourceOutput(genSpec, ReportDiagnosticsAndEmitSource); } - private static void Execute(CompilationData compilationData, ImmutableArray inputCalls, SourceProductionContext context) - { - if (inputCalls.IsDefaultOrEmpty) - { - return; - } + /// + /// Instrumentation helper for unit tests. + /// + public Action? OnSourceEmitting { get; init; } - if (compilationData?.LanguageVersionIsSupported is not true) + private void ReportDiagnosticsAndEmitSource(SourceProductionContext sourceProductionContext, (SourceGenerationSpec? SourceGenerationSpec, ImmutableEquatableArray? Diagnostics) input) + { + if (input.Diagnostics is ImmutableEquatableArray diagnostics) { - context.ReportDiagnostic(Diagnostic.Create(Parser.Diagnostics.LanguageVersionNotSupported, location: null)); - return; + foreach (DiagnosticInfo diagnostic in diagnostics) + { + sourceProductionContext.ReportDiagnostic(diagnostic.CreateDiagnostic()); + } } - Parser parser = new(context, compilationData.TypeSymbols!, inputCalls); - if (parser.GetSourceGenerationSpec() is SourceGenerationSpec spec) + if (input.SourceGenerationSpec is SourceGenerationSpec spec) { - Emitter emitter = new(context, spec); - emitter.Emit(); + OnSourceEmitting?.Invoke(spec); + Emitter emitter = new(spec); + emitter.Emit(sourceProductionContext); } } - private sealed record CompilationData + internal sealed class CompilationData { public bool LanguageVersionIsSupported { get; } public KnownTypeSymbols? TypeSymbols { get; } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/ConfigurationBinder.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/ConfigurationBinder.cs index f1c7d5f7ff215..7d723139bde3e 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/ConfigurationBinder.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/ConfigurationBinder.cs @@ -1,8 +1,8 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -using System.Collections.Generic; using System.Diagnostics; +using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { @@ -10,11 +10,9 @@ public sealed partial class ConfigurationBindingGenerator { private sealed partial class Emitter { - private bool ShouldEmitMethods(MethodsToGen_ConfigurationBinder methods) => (_sourceGenSpec.MethodsToGen_ConfigurationBinder & methods) != 0; - private void EmitBindingExtensions_IConfiguration() { - if (!ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Any)) + if (!ShouldEmitMethods(MethodsToGen.ConfigBinder_Any)) { return; } @@ -31,30 +29,30 @@ private void EmitGetMethods() const string expressionForGetCore = nameof(MethodsToGen_CoreBindingHelper.GetCore); const string documentation = "Attempts to bind the configuration instance to a new instance of type T."; - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Get_T)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_Get_T)) { - StartMethodDefinition(MethodsToGen_ConfigurationBinder.Get_T, documentation); + EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen.ConfigBinder_Get_T, documentation); _writer.WriteLine($"public static T? {Identifier.Get}(this {Identifier.IConfiguration} {Identifier.configuration}) => " + $"(T?)({expressionForGetCore}({Identifier.configuration}, typeof(T), {Identifier.configureOptions}: null) ?? default(T));"); } - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Get_T_BinderOptions)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_Get_T_BinderOptions)) { - StartMethodDefinition(MethodsToGen_ConfigurationBinder.Get_T_BinderOptions, documentation); + EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen.ConfigBinder_Get_T_BinderOptions, documentation); _writer.WriteLine($"public static T? {Identifier.Get}(this {Identifier.IConfiguration} {Identifier.configuration}, {TypeDisplayString.NullableActionOfBinderOptions} {Identifier.configureOptions}) => " + $"(T?)({expressionForGetCore}({Identifier.configuration}, typeof(T), {Identifier.configureOptions}) ?? default(T));"); } - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Get_TypeOf)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_Get_TypeOf)) { - StartMethodDefinition(MethodsToGen_ConfigurationBinder.Get_TypeOf, documentation); + EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen.ConfigBinder_Get_TypeOf, documentation); _writer.WriteLine($"public static object? {Identifier.Get}(this {Identifier.IConfiguration} {Identifier.configuration}, Type {Identifier.type}) => " + $"{expressionForGetCore}({Identifier.configuration}, {Identifier.type}, {Identifier.configureOptions}: null);"); } - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Get_TypeOf_BinderOptions)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_Get_TypeOf_BinderOptions)) { - StartMethodDefinition(MethodsToGen_ConfigurationBinder.Get_TypeOf_BinderOptions, documentation); + EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen.ConfigBinder_Get_TypeOf_BinderOptions, documentation); _writer.WriteLine($"public static object? {Identifier.Get}(this {Identifier.IConfiguration} {Identifier.configuration}, Type {Identifier.type}, {TypeDisplayString.NullableActionOfBinderOptions} {Identifier.configureOptions}) => " + $"{expressionForGetCore}({Identifier.configuration}, {Identifier.type}, {Identifier.configureOptions});"); } @@ -65,30 +63,30 @@ private void EmitGetValueMethods() const string expressionForGetValueCore = $"{Identifier.BindingExtensions}.{nameof(MethodsToGen_CoreBindingHelper.GetValueCore)}"; const string documentation = "Extracts the value with the specified key and converts it to the specified type."; - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.GetValue_T_key)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_GetValue_T_key)) { - StartMethodDefinition(MethodsToGen_ConfigurationBinder.GetValue_T_key, documentation); + EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen.ConfigBinder_GetValue_T_key, documentation); _writer.WriteLine($"public static T? {Identifier.GetValue}(this {Identifier.IConfiguration} {Identifier.configuration}, string {Identifier.key}) => " + $"(T?)({expressionForGetValueCore}({Identifier.configuration}, typeof(T), {Identifier.key}) ?? default(T));"); } - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.GetValue_T_key_defaultValue)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_GetValue_T_key_defaultValue)) { - StartMethodDefinition(MethodsToGen_ConfigurationBinder.GetValue_T_key_defaultValue, documentation); + EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen.ConfigBinder_GetValue_T_key_defaultValue, documentation); _writer.WriteLine($"public static T? {Identifier.GetValue}(this {Identifier.IConfiguration} {Identifier.configuration}, string {Identifier.key}, T {Identifier.defaultValue}) => " + $"(T?)({expressionForGetValueCore}({Identifier.configuration}, typeof(T), {Identifier.key}) ?? {Identifier.defaultValue});"); } - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.GetValue_TypeOf_key)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_GetValue_TypeOf_key)) { - StartMethodDefinition(MethodsToGen_ConfigurationBinder.GetValue_TypeOf_key, documentation); + EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen.ConfigBinder_GetValue_TypeOf_key, documentation); _writer.WriteLine($"public static object? {Identifier.GetValue}(this {Identifier.IConfiguration} {Identifier.configuration}, Type {Identifier.type}, string {Identifier.key}) => " + $"{expressionForGetValueCore}({Identifier.configuration}, {Identifier.type}, {Identifier.key});"); } - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.GetValue_TypeOf_key_defaultValue)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_GetValue_TypeOf_key_defaultValue)) { - StartMethodDefinition(MethodsToGen_ConfigurationBinder.GetValue_TypeOf_key_defaultValue, documentation); + EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen.ConfigBinder_GetValue_TypeOf_key_defaultValue, documentation); _writer.WriteLine($"public static object? {Identifier.GetValue}(this {Identifier.IConfiguration} {Identifier.configuration}, Type {Identifier.type}, string {Identifier.key}, object? {Identifier.defaultValue}) => " + $"{expressionForGetValueCore}({Identifier.configuration}, {Identifier.type}, {Identifier.key}) ?? {Identifier.defaultValue};"); } @@ -96,50 +94,52 @@ private void EmitGetValueMethods() private void EmitBindMethods_ConfigurationBinder() { - if (!ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Bind)) + if (!ShouldEmitMethods(MethodsToGen.ConfigBinder_Bind)) { return; } string instanceParamExpr = $"object? {Identifier.instance}"; - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Bind_instance)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_Bind_instance)) { EmitMethods( - MethodsToGen_ConfigurationBinder.Bind_instance, + _interceptorInfo.ConfigBinder_Bind_instance, additionalParams: instanceParamExpr, configExpression: Identifier.configuration, configureOptions: false); } - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Bind_instance_BinderOptions)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_Bind_instance_BinderOptions)) { EmitMethods( - MethodsToGen_ConfigurationBinder.Bind_instance_BinderOptions, + _interceptorInfo.ConfigBinder_Bind_instance_BinderOptions, additionalParams: $"{instanceParamExpr}, {TypeDisplayString.NullableActionOfBinderOptions} {Identifier.configureOptions}", configExpression: Identifier.configuration, configureOptions: true); } - if (ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Bind_key_instance)) + if (ShouldEmitMethods(MethodsToGen.ConfigBinder_Bind_key_instance)) { EmitMethods( - MethodsToGen_ConfigurationBinder.Bind_key_instance, + _interceptorInfo.ConfigBinder_Bind_key_instance, additionalParams: $"string {Identifier.key}, {instanceParamExpr}", configExpression: $"{Expression.configurationGetSection}({Identifier.key})", configureOptions: false); } - void EmitMethods(MethodsToGen_ConfigurationBinder method, string additionalParams, string configExpression, bool configureOptions) + void EmitMethods(ImmutableEquatableArray? interceptorInfo, string additionalParams, string configExpression, bool configureOptions) { - foreach ((ComplexTypeSpec type, List interceptorInfoList) in _sourceGenSpec.InterceptionInfo_ConfigBinder.GetOverloadInfo(method)) + Debug.Assert(interceptorInfo is not null); + + foreach ((ComplexTypeSpec type, ImmutableEquatableArray locations) in interceptorInfo) { EmitBlankLineIfRequired(); _writer.WriteLine($"/// Attempts to bind the given object instance to configuration values by matching property names against configuration keys recursively."); - EmitInterceptsLocationAnnotations(interceptorInfoList); + EmitInterceptsLocationAnnotations(locations); EmitStartBlock($"public static void {Identifier.Bind}_{type.IdentifierCompatibleSubstring}(this {Identifier.IConfiguration} {Identifier.configuration}, {additionalParams})"); - if (type.HasBindableMembers) + if (_typeIndex.HasBindableMembers(type)) { Debug.Assert(!type.IsValueType); string binderOptionsArg = configureOptions ? $"{Identifier.GetBinderOptions}({Identifier.configureOptions})" : $"{Identifier.binderOptions}: null"; @@ -147,7 +147,7 @@ void EmitMethods(MethodsToGen_ConfigurationBinder method, string additionalParam EmitCheckForNullArgument_WithBlankLine(Identifier.configuration); EmitCheckForNullArgument_WithBlankLine(Identifier.instance, voidReturn: true); _writer.WriteLine($$""" - var {{Identifier.typedObj}} = ({{type.EffectiveType.DisplayString}}){{Identifier.instance}}; + var {{Identifier.typedObj}} = ({{type.DisplayString}}){{Identifier.instance}}; {{nameof(MethodsToGen_CoreBindingHelper.BindCore)}}({{configExpression}}, ref {{Identifier.typedObj}}, defaultValueIfNotFound: false, {{binderOptionsArg}}); """); } @@ -157,11 +157,11 @@ void EmitMethods(MethodsToGen_ConfigurationBinder method, string additionalParam } } - private void StartMethodDefinition(MethodsToGen_ConfigurationBinder method, string documentation) + private void EmitStartDefinition_Get_Or_GetValue_Overload(MethodsToGen overload, string documentation) { EmitBlankLineIfRequired(); _writer.WriteLine($"/// {documentation}"); - EmitInterceptsLocationAnnotations(method); + EmitInterceptsLocationAnnotations(overload); } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/CoreBindingHelpers.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/CoreBindingHelpers.cs index 90531efe1b0c1..1e14094c66440 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/CoreBindingHelpers.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/CoreBindingHelpers.cs @@ -7,6 +7,7 @@ using System.Linq; using System.Text.RegularExpressions; using Microsoft.CodeAnalysis; +using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { @@ -18,7 +19,7 @@ private sealed partial class Emitter private bool _emitBlankLineBeforeNextStatement; private static readonly Regex s_arrayBracketsRegex = new(Regex.Escape("[]")); - private bool ShouldEmitMethods(MethodsToGen_CoreBindingHelper methods) => (_sourceGenSpec.MethodsToGen_CoreBindingHelper & methods) != 0; + private bool ShouldEmitMethods(MethodsToGen_CoreBindingHelper methods) => (_bindingHelperInfo.MethodsToGen & methods) != 0; private void EmitCoreBindingHelpers() { @@ -36,33 +37,54 @@ private void EmitCoreBindingHelpers() private void EmitConfigurationKeyCaches() { - if (!_sourceGenSpec.TypesForGen_CoreBindingHelper_Methods.TryGetValue(MethodsToGen_CoreBindingHelper.BindCore, out HashSet targetTypes)) + if (_bindingHelperInfo.TypesForGen_BindCore is not { Count: not 0 } types) { return; } EmitBlankLineIfRequired(); - foreach (TypeSpec type in targetTypes) + foreach (TypeSpec type in types) { if (type is not ObjectSpec objectType) { continue; } - HashSet keys = new(objectType.ConstructorParameters.Select(m => GetCacheElement(m))); - keys.UnionWith(objectType.Properties.Values.Select(m => GetCacheElement(m))); + Debug.Assert(_typeIndex.HasBindableMembers(objectType)); + + HashSet? keys = null; static string GetCacheElement(MemberSpec member) => $@"""{member.ConfigurationKeyName}"""; + if (objectType.ConstructorParameters?.Select(m => GetCacheElement(m)) is IEnumerable paramNames) + { + keys = new(paramNames); + } + + if (objectType.Properties?.Select(m => GetCacheElement(m)) is IEnumerable propNames) + { + if (keys is null) + { + keys = new(propNames); + } + else + { + keys.UnionWith(propNames); + } + } + + // Type has bindable members. + Debug.Assert(keys is not null); + string configKeysSource = string.Join(", ", keys); - string fieldName = GetConfigKeyCacheFieldName(objectType); + string fieldName = TypeIndex.GetConfigKeyCacheFieldName(objectType); _writer.WriteLine($@"private readonly static Lazy<{TypeDisplayString.HashSetOfString}> {fieldName} = new(() => new {TypeDisplayString.HashSetOfString}(StringComparer.OrdinalIgnoreCase) {{ {configKeysSource} }});"); } } private void EmitGetCoreMethod() { - if (!_sourceGenSpec.TypesForGen_CoreBindingHelper_Methods.TryGetValue(MethodsToGen_CoreBindingHelper.GetCore, out HashSet? types)) + if (_bindingHelperInfo.TypesForGen_GetCore is not { Count: not 0 } targetTypes) { return; } @@ -78,10 +100,11 @@ private void EmitGetCoreMethod() EmitIConfigurationHasValueOrChildrenCheck(voidReturn: false); bool isFirstType = true; - foreach (TypeSpec type in types) + foreach (TypeSpec type in targetTypes) { - TypeSpec effectiveType = type.EffectiveType; - TypeSpecKind kind = effectiveType.SpecKind; + Debug.Assert(_typeIndex.CanBindTo(type.TypeRef)); + + TypeSpec effectiveType = _typeIndex.GetEffectiveTypeSpec(type); string conditionKindExpr = GetConditionKindExpr(ref isFirstType); EmitStartBlock($"{conditionKindExpr} ({Identifier.type} == typeof({type.DisplayString}))"); @@ -101,7 +124,7 @@ private void EmitGetCoreMethod() useIncrementalStringValueIdentifier: false); } break; - case ConfigurationSectionSpec configurationSectionSpec: + case ConfigurationSectionSpec: { EmitCastToIConfigurationSection(); _writer.WriteLine($"return {Identifier.section};"); @@ -109,7 +132,7 @@ private void EmitGetCoreMethod() break; case ComplexTypeSpec complexType: { - if (complexType.CanInstantiate) + if (_typeIndex.CanInstantiate(complexType)) { EmitBindingLogic(complexType, Identifier.instance, Identifier.configuration, InitializationKind.Declaration, ValueDefaulting.CallSetter); _writer.WriteLine($"return {Identifier.instance};"); @@ -118,6 +141,12 @@ private void EmitGetCoreMethod() { _writer.WriteLine($@"throw new {Identifier.InvalidOperationException}(""{exMsg}"");"); } +#if DEBUG + else + { + Debug.Fail($"Complex should not be included for GetCore gen: {complexType.DisplayString}"); + } +#endif } break; } @@ -141,7 +170,7 @@ void EmitCastToIConfigurationSection() => private void EmitGetValueCoreMethod() { - if (!_sourceGenSpec.TypesForGen_CoreBindingHelper_Methods.TryGetValue(MethodsToGen_CoreBindingHelper.GetValueCore, out HashSet? targetTypes)) + if (_bindingHelperInfo.TypesForGen_GetValueCore is not { Count: not 0 } targetTypes) { return; } @@ -169,7 +198,7 @@ private void EmitGetValueCoreMethod() EmitStartBlock($"{conditionKindExpr} ({Identifier.type} == typeof({type.DisplayString}))"); EmitBindingLogic( - (ParsableFromStringSpec)type.EffectiveType, + (ParsableFromStringSpec)_typeIndex.GetEffectiveTypeSpec(type), Identifier.value, Expression.sectionPath, writeOnSuccess: (parsedValueExpr) => _writer.WriteLine($"return {parsedValueExpr};"), @@ -188,7 +217,7 @@ private void EmitGetValueCoreMethod() private void EmitBindCoreMainMethod() { - if (!_sourceGenSpec.TypesForGen_CoreBindingHelper_Methods.TryGetValue(MethodsToGen_CoreBindingHelper.BindCoreMain, out HashSet? targetTypes)) + if (_bindingHelperInfo.TypesForGen_BindCoreMain is not { Count: not 0 } targetTypes) { return; } @@ -203,8 +232,8 @@ private void EmitBindCoreMainMethod() bool isFirstType = true; foreach (ComplexTypeSpec type in targetTypes) { - ComplexTypeSpec effectiveType = (ComplexTypeSpec)type.EffectiveType; - Debug.Assert(effectiveType.HasBindableMembers); + ComplexTypeSpec effectiveType = (ComplexTypeSpec)_typeIndex.GetEffectiveTypeSpec(type); + Debug.Assert(_typeIndex.HasBindableMembers(effectiveType)); string conditionKindExpr = GetConditionKindExpr(ref isFirstType); EmitStartBlock($"{conditionKindExpr} ({Identifier.type} == typeof({type.DisplayString}))"); @@ -221,14 +250,14 @@ private void EmitBindCoreMainMethod() private void EmitBindCoreMethods() { - if (!_sourceGenSpec.TypesForGen_CoreBindingHelper_Methods.TryGetValue(MethodsToGen_CoreBindingHelper.BindCore, out HashSet? targetTypes)) + if (_bindingHelperInfo.TypesForGen_BindCore is not ImmutableEquatableArray types) { return; } - foreach (ComplexTypeSpec type in targetTypes) + foreach (ComplexTypeSpec type in types) { - Debug.Assert(type.HasBindableMembers); + Debug.Assert(_typeIndex.HasBindableMembers(type)); EmitBlankLineIfRequired(); EmitBindCoreMethod(type); } @@ -239,26 +268,35 @@ private void EmitBindCoreMethod(ComplexTypeSpec type) string objParameterExpression = $"ref {type.DisplayString} {Identifier.instance}"; EmitStartBlock(@$"public static void {nameof(MethodsToGen_CoreBindingHelper.BindCore)}({Identifier.IConfiguration} {Identifier.configuration}, {objParameterExpression}, bool defaultValueIfNotFound, {Identifier.BinderOptions}? {Identifier.binderOptions})"); - ComplexTypeSpec effectiveType = (ComplexTypeSpec)type.EffectiveType; - if (effectiveType is EnumerableSpec enumerable) - { - if (effectiveType.InstantiationStrategy is InstantiationStrategy.Array) - { - Debug.Assert(type == effectiveType); - EmitPopulationImplForArray((EnumerableSpec)type); - } - else - { - EmitPopulationImplForEnumerableWithAdd(enumerable); - } - } - else if (effectiveType is DictionarySpec dictionary) - { - EmitBindCoreImplForDictionary(dictionary); - } - else + ComplexTypeSpec effectiveType = (ComplexTypeSpec)_typeIndex.GetEffectiveTypeSpec(type); + + switch (effectiveType) { - EmitBindCoreImplForObject((ObjectSpec)effectiveType); + case ArraySpec arrayType: + { + EmitBindCoreImplForArray(arrayType); + } + break; + case EnumerableSpec enumerableType: + { + EmitBindCoreImplForEnumerableWithAdd(enumerableType); + } + break; + case DictionarySpec dictionaryType: + { + EmitBindCoreImplForDictionary(dictionaryType); + } + break; + case ObjectSpec objectType: + { + EmitBindCoreImplForObject(objectType); + } + break; + default: + { + Debug.Fail($"Unsupported spec for bind core gen: {effectiveType.GetType()}"); + } + break; } EmitEndBlock(); @@ -266,12 +304,12 @@ private void EmitBindCoreMethod(ComplexTypeSpec type) private void EmitInitializeMethods() { - if (!_sourceGenSpec.TypesForGen_CoreBindingHelper_Methods.TryGetValue(MethodsToGen_CoreBindingHelper.Initialize, out HashSet? targetTypes)) + if (_bindingHelperInfo.TypesForGen_Initialize is not ImmutableEquatableArray types) { return; } - foreach (ObjectSpec type in targetTypes) + foreach (ObjectSpec type in types) { EmitBlankLineIfRequired(); EmitInitializeMethod(type); @@ -280,16 +318,20 @@ private void EmitInitializeMethods() private void EmitInitializeMethod(ObjectSpec type) { - Debug.Assert(type.CanInstantiate); - List ctorParams = type.ConstructorParameters; - IEnumerable initOnlyProps = type.Properties.Values.Where(prop => prop is { SetOnInit: true }); + Debug.Assert(type.InstantiationStrategy is ObjectInstantiationStrategy.ParameterizedConstructor); + Debug.Assert(_typeIndex.CanInstantiate(type)); + Debug.Assert( + type is { Properties: not null, ConstructorParameters: not null }, + $"Expecting type for init method, {type.DisplayString}, to have both properties and ctor params."); + + IEnumerable initOnlyProps = type.Properties.Where(prop => prop is { SetOnInit: true }); List ctorArgList = new(); string displayString = type.DisplayString; EmitStartBlock($"public static {type.DisplayString} {GetInitalizeMethodDisplayString(type)}({Identifier.IConfiguration} {Identifier.configuration}, {Identifier.BinderOptions}? {Identifier.binderOptions})"); _emitBlankLineBeforeNextStatement = false; - foreach (ParameterSpec parameter in ctorParams) + foreach (ParameterSpec parameter in type.ConstructorParameters) { string name = parameter.Name; string argExpr = parameter.RefKind switch @@ -307,7 +349,7 @@ private void EmitInitializeMethod(ObjectSpec type) foreach (PropertySpec property in initOnlyProps) { - if (property.ShouldBindTo && property.MatchingCtorParam is null) + if (_typeIndex.ShouldBindTo(property) && property.MatchingCtorParam is null) { EmitBindImplForMember(property); } @@ -335,7 +377,7 @@ private void EmitInitializeMethod(ObjectSpec type) void EmitBindImplForMember(MemberSpec member) { - TypeSpec memberType = member.Type; + TypeSpec memberType = _typeIndex.GetTypeSpec(member.TypeRef); string parsedMemberDeclarationLhs = $"{memberType.DisplayString} {member.Name}"; string configKeyName = member.ConfigurationKeyName; string parsedMemberAssignmentLhsExpr; @@ -427,29 +469,32 @@ private void EmitHelperMethods() } if (ShouldEmitMethods(MethodsToGen_CoreBindingHelper.BindCoreMain | MethodsToGen_CoreBindingHelper.GetCore) || - ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Bind_instance_BinderOptions)) + ShouldEmitMethods(MethodsToGen.ConfigBinder_Bind_instance_BinderOptions)) { EmitBlankLineIfRequired(); EmitGetBinderOptionsHelper(); } - bool enumTypeExists = false; - - foreach (ParsableFromStringSpec type in _sourceGenSpec.PrimitivesForHelperGen) + if (_bindingHelperInfo.TypesForGen_ParsePrimitive is { Count: not 0 } stringParsableTypes) { - EmitBlankLineIfRequired(); + bool enumTypeExists = false; - if (type.StringParsableTypeKind == StringParsableTypeKind.Enum) + foreach (ParsableFromStringSpec type in stringParsableTypes) { - if (!enumTypeExists) + EmitBlankLineIfRequired(); + + if (type.StringParsableTypeKind == StringParsableTypeKind.Enum) { - EmitEnumParseMethod(); - enumTypeExists = true; + if (!enumTypeExists) + { + EmitEnumParseMethod(); + enumTypeExists = true; + } + } + else + { + EmitPrimitiveParseMethod(type); } - } - else - { - EmitPrimitiveParseMethod(type); } } } @@ -615,7 +660,7 @@ private void EmitPrimitiveParseMethod(ParsableFromStringSpec type) string exceptionArg1 = string.Format(ExceptionMessages.FailedBinding, $"{{{Identifier.getPath}()}}", $"{{typeof({typeDisplayString})}}"); - EmitStartBlock($"public static {typeDisplayString} {type.ParseMethodName}(string {Identifier.value}, Func {Identifier.getPath})"); + EmitStartBlock($"public static {typeDisplayString} {TypeIndex.GetParseMethodName(type)}(string {Identifier.value}, Func {Identifier.getPath})"); EmitEndBlock($$""" try { @@ -628,13 +673,19 @@ private void EmitPrimitiveParseMethod(ParsableFromStringSpec type) """); } - private void EmitPopulationImplForArray(EnumerableSpec type) + private void EmitBindCoreImplForArray(ArraySpec type) { - EnumerableSpec typeToInstantiate = (EnumerableSpec)type.TypeToInstantiate; - - // Create list and bind elements. + TypeRef elementTypeRef = type.ElementTypeRef; + string elementTypeDisplayString = _typeIndex.GetTypeSpec(elementTypeRef).DisplayString; string tempIdentifier = GetIncrementalIdentifier(Identifier.temp); - EmitBindingLogic(typeToInstantiate, tempIdentifier, Identifier.configuration, InitializationKind.Declaration, ValueDefaulting.None); + + // Create temp list. + _writer.WriteLine($"var {tempIdentifier} = new List<{elementTypeDisplayString}>();"); + _writer.WriteLine(); + + // Bind elements to temp list. + EmitBindingLogicForEnumerableWithAdd(elementTypeRef, tempIdentifier); + _writer.WriteLine(); // Resize array and add binded elements. _writer.WriteLine($$""" @@ -644,15 +695,19 @@ private void EmitPopulationImplForArray(EnumerableSpec type) """); } - private void EmitPopulationImplForEnumerableWithAdd(EnumerableSpec type) + private void EmitBindCoreImplForEnumerableWithAdd(EnumerableSpec type) { EmitCollectionCastIfRequired(type, out string instanceIdentifier); + EmitBindingLogicForEnumerableWithAdd(type.ElementTypeRef, instanceIdentifier); + } + private void EmitBindingLogicForEnumerableWithAdd(TypeRef elementTypeRef, string enumerableIdentifier) + { Emit_Foreach_Section_In_ConfigChildren_StartBlock(); - string addExpr = $"{instanceIdentifier}.{Identifier.Add}"; + string addExpr = $"{enumerableIdentifier}.{Identifier.Add}"; - switch (type.ElementType) + switch (_typeIndex.GetEffectiveTypeSpec(elementTypeRef)) { case ParsableFromStringSpec stringParsableType: { @@ -666,12 +721,12 @@ private void EmitPopulationImplForEnumerableWithAdd(EnumerableSpec type) useIncrementalStringValueIdentifier: false); } break; - case ConfigurationSectionSpec configurationSection: + case ConfigurationSectionSpec: { _writer.WriteLine($"{addExpr}({Identifier.section});"); } break; - case ComplexTypeSpec { CanInstantiate: true } complexType: + case ComplexTypeSpec complexType when _typeIndex.CanInstantiate(complexType): { EmitBindingLogic(complexType, Identifier.value, Identifier.section, InitializationKind.Declaration, ValueDefaulting.None); _writer.WriteLine($"{addExpr}({Identifier.value});"); @@ -688,8 +743,8 @@ private void EmitBindCoreImplForDictionary(DictionarySpec type) Emit_Foreach_Section_In_ConfigChildren_StartBlock(); - ParsableFromStringSpec keyType = type.KeyType; - TypeSpec elementType = type.ElementType; + ParsableFromStringSpec keyType = (ParsableFromStringSpec)_typeIndex.GetEffectiveTypeSpec(type.KeyTypeRef); + TypeSpec elementType = _typeIndex.GetTypeSpec(type.ElementTypeRef); // Parse key EmitBindingLogic( @@ -717,15 +772,13 @@ void Emit_BindAndAddLogic_ForElement(string parsedKeyExpr) useIncrementalStringValueIdentifier: false); } break; - case ConfigurationSectionSpec configurationSection: + case ConfigurationSectionSpec: { _writer.WriteLine($"{instanceIdentifier}[{parsedKeyExpr}] = {Identifier.section};"); } break; case ComplexTypeSpec complexElementType: { - Debug.Assert(complexElementType.CanInstantiate); - if (keyType.StringParsableTypeKind is not StringParsableTypeKind.AssignFromSectionValue) { // Save value to local to avoid parsing twice - during look-up and during add. @@ -746,12 +799,32 @@ void Emit_BindAndAddLogic_ForElement(string parsedKeyExpr) conditionToUseExistingElement += $" && {expressionForElementIsNotNull}"; } - EmitStartBlock($"if (!({conditionToUseExistingElement}))"); - EmitObjectInit(complexElementType, Identifier.element, InitializationKind.SimpleAssignment, Identifier.section); - EmitEndBlock(); + if (_typeIndex.CanInstantiate(complexElementType)) + { + EmitStartBlock($"if (!({conditionToUseExistingElement}))"); + EmitObjectInit(complexElementType, Identifier.element, InitializationKind.SimpleAssignment, Identifier.section); + EmitEndBlock(); - EmitBindingLogic(complexElementType, Identifier.element, Identifier.section, InitializationKind.None, ValueDefaulting.None); - _writer.WriteLine($"{instanceIdentifier}[{parsedKeyExpr}] = {Identifier.element};"); + EmitBindingLogic(); + } + else + { + EmitStartBlock($"if ({conditionToUseExistingElement})"); + EmitBindingLogic(); + EmitEndBlock(); + } + + void EmitBindingLogic() + { + this.EmitBindingLogic( + complexElementType, + Identifier.element, + Identifier.section, + InitializationKind.None, + ValueDefaulting.None); + + _writer.WriteLine($"{instanceIdentifier}[{parsedKeyExpr}] = {Identifier.element};"); + } } break; } @@ -762,16 +835,15 @@ void Emit_BindAndAddLogic_ForElement(string parsedKeyExpr) private void EmitBindCoreImplForObject(ObjectSpec type) { - Debug.Assert(type.HasBindableMembers); + Debug.Assert(_typeIndex.HasBindableMembers(type)); - string keyCacheFieldName = GetConfigKeyCacheFieldName(type); + string keyCacheFieldName = TypeIndex.GetConfigKeyCacheFieldName(type); string validateMethodCallExpr = $"{Identifier.ValidateConfigurationKeys}(typeof({type.DisplayString}), {keyCacheFieldName}, {Identifier.configuration}, {Identifier.binderOptions});"; _writer.WriteLine(validateMethodCallExpr); - foreach (PropertySpec property in type.Properties.Values) + foreach (PropertySpec property in type.Properties!) { - bool noSetter_And_IsReadonly = !property.CanSet && property.Type is CollectionSpec { InstantiationStrategy: InstantiationStrategy.ParameterizedConstructor }; - if (property.ShouldBindTo && !noSetter_And_IsReadonly) + if (_typeIndex.ShouldBindTo(property)) { string containingTypeRef = property.IsStatic ? type.DisplayString : Identifier.instance; EmitBindImplForMember( @@ -791,11 +863,9 @@ private bool EmitBindImplForMember( bool canSet, InitializationKind initializationKind) { - TypeSpec effectiveMemberType = member.Type.EffectiveType; - string sectionParseExpr = GetSectionFromConfigurationExpression(member.ConfigurationKeyName); - switch (effectiveMemberType) + switch (_typeIndex.GetEffectiveTypeSpec(member.TypeRef)) { case ParsableFromStringSpec stringParsableType: { @@ -804,8 +874,8 @@ private bool EmitBindImplForMember( bool useDefaultValueIfSectionValueIsNull = initializationKind == InitializationKind.Declaration && member is PropertySpec && - member.Type.IsValueType && - member.Type.SpecKind is not TypeSpecKind.Nullable; + member.TypeRef.IsValueType && + _typeIndex.GetTypeSpec(member.TypeRef) is not NullableSpec; EmitBlankLineIfRequired(); EmitBindingLogic( @@ -840,7 +910,7 @@ member is PropertySpec && EmitBindingLogicForComplexMember(member, memberAccessExpr, sectionIdentifier, canSet); EmitEndBlock(); - return complexType.CanInstantiate; + return _typeIndex.CanInstantiate(complexType); } default: return false; @@ -854,8 +924,8 @@ private void EmitBindingLogicForComplexMember( bool canSet) { - TypeSpec memberType = member.Type; - ComplexTypeSpec effectiveMemberType = (ComplexTypeSpec)memberType.EffectiveType; + TypeSpec memberType = _typeIndex.GetTypeSpec(member.TypeRef); + ComplexTypeSpec effectiveMemberType = (ComplexTypeSpec)_typeIndex.GetEffectiveTypeSpec(memberType); string tempIdentifier = GetIncrementalIdentifier(Identifier.temp); InitializationKind initKind; @@ -872,7 +942,7 @@ private void EmitBindingLogicForComplexMember( string effectiveMemberTypeDisplayString = effectiveMemberType.DisplayString; initKind = InitializationKind.None; - if (memberType.SpecKind is TypeSpecKind.Nullable) + if (memberType is NullableSpec) { string nullableTempIdentifier = GetIncrementalIdentifier(Identifier.temp); @@ -902,12 +972,12 @@ private void EmitBindingLogicForComplexMember( Action? writeOnSuccess = !canSet ? null : bindedValueIdentifier => + { + if (memberAccessExpr != bindedValueIdentifier) { - if (memberAccessExpr != bindedValueIdentifier) - { - _writer.WriteLine($"{memberAccessExpr} = {bindedValueIdentifier};"); - } - }; + _writer.WriteLine($"{memberAccessExpr} = {bindedValueIdentifier};"); + } + }; EmitBindingLogic( effectiveMemberType, @@ -927,11 +997,11 @@ private void EmitBindingLogic( ValueDefaulting valueDefaulting, Action? writeOnSuccess = null) { - if (!type.HasBindableMembers) + if (!_typeIndex.HasBindableMembers(type)) { if (initKind is not InitializationKind.None) { - if (type.CanInstantiate) + if (_typeIndex.CanInstantiate(type)) { EmitObjectInit(type, memberAccessExpr, initKind, configArgExpr); } @@ -965,7 +1035,7 @@ void EmitBindingLogic(string instanceToBindExpr, InitializationKind initKind) { string bindCoreCall = $@"{nameof(MethodsToGen_CoreBindingHelper.BindCore)}({configArgExpr}, ref {instanceToBindExpr}, defaultValueIfNotFound: {FormatDefaultValueIfNotFound()}, {Identifier.binderOptions});"; - if (type.CanInstantiate) + if (_typeIndex.CanInstantiate(type)) { if (initKind is not InitializationKind.None) { @@ -1018,7 +1088,7 @@ private void EmitBindingLogic( { StringParsableTypeKind.AssignFromSectionValue => stringValueToParse_Expr, StringParsableTypeKind.Enum => $"ParseEnum<{type.DisplayString}>({stringValueToParse_Expr}, () => {sectionPathExpr})", - _ => $"{type.ParseMethodName}({stringValueToParse_Expr}, () => {sectionPathExpr})", + _ => $"{TypeIndex.GetParseMethodName(type)}({stringValueToParse_Expr}, () => {sectionPathExpr})", }; if (!checkForNullSectionValue) @@ -1046,56 +1116,72 @@ private void EmitBindingLogic( private bool EmitObjectInit(ComplexTypeSpec type, string memberAccessExpr, InitializationKind initKind, string configArgExpr) { CollectionSpec? collectionType = type as CollectionSpec; + ObjectSpec? objectType = type as ObjectSpec; + + string? castExpr = null; string initExpr; string effectiveDisplayString = type.DisplayString; if (collectionType is not null) { - if (collectionType is EnumerableSpec { InstantiationStrategy: InstantiationStrategy.Array }) + if (collectionType is ArraySpec) { initExpr = $"new {s_arrayBracketsRegex.Replace(effectiveDisplayString, "[0]", 1)}"; } else { - effectiveDisplayString = (collectionType.TypeToInstantiate ?? collectionType).DisplayString; - initExpr = $"new {effectiveDisplayString}()"; + CollectionWithCtorInitSpec collectionWithCtorInitType = (CollectionWithCtorInitSpec)collectionType; + + if (collectionWithCtorInitType.InstantiationConcreteType is not CollectionInstantiationConcreteType.Self) + { + castExpr = $"({collectionWithCtorInitType.DisplayString})"; + } + + effectiveDisplayString = _typeIndex.GetInstantiationTypeDisplayString(collectionWithCtorInitType); + initExpr = $"{castExpr}new {effectiveDisplayString}()"; } } - else if (type.InstantiationStrategy is InstantiationStrategy.ParameterlessConstructor) - { - initExpr = $"new {effectiveDisplayString}()"; - } else { - Debug.Assert(type.InstantiationStrategy is InstantiationStrategy.ParameterizedConstructor); - string initMethodIdentifier = GetInitalizeMethodDisplayString(((ObjectSpec)type)); - initExpr = $"{initMethodIdentifier}({configArgExpr}, {Identifier.binderOptions})"; + Debug.Assert(objectType is not null); + ObjectInstantiationStrategy strategy = objectType.InstantiationStrategy; + + if (strategy is ObjectInstantiationStrategy.ParameterlessConstructor) + { + initExpr = $"new {effectiveDisplayString}()"; + } + else + { + Debug.Assert(strategy is ObjectInstantiationStrategy.ParameterizedConstructor); + string initMethodIdentifier = GetInitalizeMethodDisplayString(((ObjectSpec)type)); + initExpr = $"{initMethodIdentifier}({configArgExpr}, {Identifier.binderOptions})"; + } } switch (initKind) { case InitializationKind.Declaration: { - Debug.Assert(!memberAccessExpr.Contains(".")); + Debug.Assert(!memberAccessExpr.Contains('.')); _writer.WriteLine($"var {memberAccessExpr} = {initExpr};"); } break; case InitializationKind.AssignmentWithNullCheck: { - if (collectionType is CollectionSpec + + if (collectionType is CollectionWithCtorInitSpec { - InstantiationStrategy: InstantiationStrategy.ParameterizedConstructor or InstantiationStrategy.ToEnumerableMethod - }) + InstantiationStrategy: CollectionInstantiationStrategy.CopyConstructor or CollectionInstantiationStrategy.LinqToDictionary + } collectionWithCtorInitType) { - if (collectionType.InstantiationStrategy is InstantiationStrategy.ParameterizedConstructor) - { - _writer.WriteLine($"{memberAccessExpr} = {memberAccessExpr} is null ? {initExpr} : new {effectiveDisplayString}({memberAccessExpr});"); - } - else - { - Debug.Assert(collectionType is DictionarySpec); - _writer.WriteLine($"{memberAccessExpr} = {memberAccessExpr} is null ? {initExpr} : {memberAccessExpr}.ToDictionary(pair => pair.Key, pair => pair.Value);"); - } + string assignmentValueIfMemberNull = collectionWithCtorInitType.InstantiationStrategy is CollectionInstantiationStrategy.CopyConstructor + ? $"new {effectiveDisplayString}({memberAccessExpr})" + : $"{memberAccessExpr}.ToDictionary(pair => pair.Key, pair => pair.Value)"; + + Debug.Assert(castExpr is not null || collectionWithCtorInitType.InstantiationConcreteType is CollectionInstantiationConcreteType.Self); + assignmentValueIfMemberNull = $"{castExpr}{assignmentValueIfMemberNull}"; + + _writer.WriteLine($"{memberAccessExpr} = {memberAccessExpr} is null ? {initExpr} : {assignmentValueIfMemberNull};"); } else { @@ -1130,20 +1216,25 @@ private void EmitIConfigurationHasValueOrChildrenCheck(bool voidReturn) _writer.WriteLine(); } - private void EmitCollectionCastIfRequired(CollectionSpec type, out string instanceIdentifier) + private void EmitCollectionCastIfRequired(CollectionWithCtorInitSpec type, out string instanceIdentifier) { - instanceIdentifier = Identifier.instance; - if (type.PopulationStrategy is CollectionPopulationStrategy.Cast_Then_Add) + if (type.PopulationCastType is CollectionPopulationCastType.NotApplicable) { - instanceIdentifier = Identifier.temp; - _writer.WriteLine($$""" - if ({{Identifier.instance}} is not {{type.PopulationCastType!.DisplayString}} {{instanceIdentifier}}) + instanceIdentifier = Identifier.instance; + return; + } + + string castTypeDisplayString = _typeIndex.GetPopulationCastTypeDisplayString(type); + instanceIdentifier = Identifier.temp; + + _writer.WriteLine($$""" + if ({{Identifier.instance}} is not {{castTypeDisplayString}} {{instanceIdentifier}}) { return; } """); - _writer.WriteLine(); - } + _writer.WriteLine(); + } private void Emit_Foreach_Section_In_ConfigChildren_StartBlock() => @@ -1171,9 +1262,6 @@ private static string GetConditionKindExpr(ref bool isFirstType) return "else if"; } - - private static string GetConfigKeyCacheFieldName(ObjectSpec type) => - $"s_configKeys_{type.IdentifierCompatibleSubstring}"; } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/Helpers.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/Helpers.cs index a7db2fb516397..34a97d3c64c76 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/Helpers.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/Helpers.cs @@ -1,7 +1,6 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -using System; using System.Collections.Generic; using System.Diagnostics; using System.Reflection; @@ -135,30 +134,29 @@ private static class Identifier public const string Value = nameof(Value); } - private bool ShouldEmitBindingExtensions() => - ShouldEmitMethods(MethodsToGen_ConfigurationBinder.Any) || - ShouldEmitMethods(MethodsToGen_Extensions_OptionsBuilder.Any) || - ShouldEmitMethods(MethodsToGen_Extensions_ServiceCollection.Any); + private bool ShouldEmitMethods(MethodsToGen methods) => (_interceptorInfo.MethodsToGen & methods) != 0; - private void EmitInterceptsLocationAnnotations(Enum generatedBindingOverload) + private void EmitInterceptsLocationAnnotations(MethodsToGen overload) { + IEnumerable? infoList = _interceptorInfo.GetInfo(overload); + bool interceptsCalls = infoList is not null; + // The only time a generated binding method won't have any locations to // intercept is when either of these methods are used as helpers for // other generated OptionsBuilder or ServiceCollection binding extensions. - bool interceptsCalls = _sourceGenSpec.InterceptionInfo.TryGetValue(generatedBindingOverload, out List? infoList); Debug.Assert(interceptsCalls || - generatedBindingOverload is MethodsToGen_Extensions_ServiceCollection.Configure_T_name_BinderOptions || - generatedBindingOverload is MethodsToGen_Extensions_OptionsBuilder.Bind_T_BinderOptions); + overload is MethodsToGen.ServiceCollectionExt_Configure_T_name_BinderOptions || + overload is MethodsToGen.OptionsBuilderExt_Bind_T_BinderOptions); if (interceptsCalls) { - EmitInterceptsLocationAnnotations(infoList); + EmitInterceptsLocationAnnotations(infoList!); } } - private void EmitInterceptsLocationAnnotations(List infoList) + private void EmitInterceptsLocationAnnotations(IEnumerable infoList) { - foreach (InterceptorLocationInfo info in infoList) + foreach (InvocationLocationInfo info in infoList) { _writer.WriteLine($@"[{Identifier.InterceptsLocation}(@""{info.FilePath}"", {info.LineNumber}, {info.CharacterNumber})]"); } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/OptionsBuilderConfigurationExtensions.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/OptionsBuilderConfigurationExtensions.cs index 7fd5d695eaf45..fdc4286e34c55 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/OptionsBuilderConfigurationExtensions.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/OptionsBuilderConfigurationExtensions.cs @@ -7,11 +7,9 @@ public sealed partial class ConfigurationBindingGenerator { private sealed partial class Emitter { - private bool ShouldEmitMethods(MethodsToGen_Extensions_OptionsBuilder methods) => (_sourceGenSpec.MethodsToGen_OptionsBuilderExt & methods) != 0; - private void EmitBindingExtensions_OptionsBuilder() { - if (!ShouldEmitMethods(MethodsToGen_Extensions_OptionsBuilder.Any)) + if (!ShouldEmitMethods(MethodsToGen.OptionsBuilderExt_Any)) { return; } @@ -24,7 +22,7 @@ private void EmitBindingExtensions_OptionsBuilder() private void EmitBindMethods_Extensions_OptionsBuilder() { - if (!ShouldEmitMethods(MethodsToGen_Extensions_OptionsBuilder.Bind)) + if (!ShouldEmitMethods(MethodsToGen.OptionsBuilderExt_Bind)) { return; } @@ -32,15 +30,15 @@ private void EmitBindMethods_Extensions_OptionsBuilder() const string documentation = @"/// Registers a configuration instance which will bind against."; const string paramList = $"{Identifier.IConfiguration} {Identifier.config}"; - if (ShouldEmitMethods(MethodsToGen_Extensions_OptionsBuilder.Bind_T)) + if (ShouldEmitMethods(MethodsToGen.OptionsBuilderExt_Bind_T)) { - EmitMethodStartBlock(MethodsToGen_Extensions_OptionsBuilder.Bind_T, "Bind", paramList, documentation); + EmitMethodStartBlock(MethodsToGen.OptionsBuilderExt_Bind_T, "Bind", paramList, documentation); _writer.WriteLine($"return Bind({Identifier.optionsBuilder}, {Identifier.config}, {Identifier.configureBinder}: null);"); EmitEndBlock(); } EmitMethodStartBlock( - MethodsToGen_Extensions_OptionsBuilder.Bind_T_BinderOptions, + MethodsToGen.OptionsBuilderExt_Bind_T_BinderOptions, "Bind", paramList + $", {TypeDisplayString.NullableActionOfBinderOptions} {Identifier.configureBinder}", documentation); @@ -57,7 +55,7 @@ private void EmitBindMethods_Extensions_OptionsBuilder() private void EmitBindConfigurationMethod() { - if (!ShouldEmitMethods(MethodsToGen_Extensions_OptionsBuilder.BindConfiguration_T_path_BinderOptions)) + if (!ShouldEmitMethods(MethodsToGen.OptionsBuilderExt_BindConfiguration_T_path_BinderOptions)) { return; } @@ -65,7 +63,7 @@ private void EmitBindConfigurationMethod() const string documentation = $@"/// Registers the dependency injection container to bind against the obtained from the DI service provider."; string paramList = $"string {Identifier.configSectionPath}, {TypeDisplayString.NullableActionOfBinderOptions} {Identifier.configureBinder} = null"; - EmitMethodStartBlock(MethodsToGen_Extensions_OptionsBuilder.BindConfiguration, "BindConfiguration", paramList, documentation); + EmitMethodStartBlock(MethodsToGen.OptionsBuilderExt_BindConfiguration, "BindConfiguration", paramList, documentation); EmitCheckForNullArgument_WithBlankLine(Identifier.optionsBuilder); EmitCheckForNullArgument_WithBlankLine(Identifier.configSectionPath); @@ -89,7 +87,7 @@ private void EmitBindConfigurationMethod() EmitEndBlock(); } - private void EmitMethodStartBlock(MethodsToGen_Extensions_OptionsBuilder method, string methodName, string paramList, string documentation) + private void EmitMethodStartBlock(MethodsToGen method, string methodName, string paramList, string documentation) { paramList = $"this {TypeDisplayString.OptionsBuilderOfTOptions} {Identifier.optionsBuilder}, {paramList}"; EmitBlankLineIfRequired(); diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/OptionsConfigurationServiceCollectionExtensions.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/OptionsConfigurationServiceCollectionExtensions.cs index 7577e0c49de4d..daa3b79db8abc 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/OptionsConfigurationServiceCollectionExtensions.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Emitter/OptionsConfigurationServiceCollectionExtensions.cs @@ -7,11 +7,9 @@ public sealed partial class ConfigurationBindingGenerator { private sealed partial class Emitter { - private bool ShouldEmitMethods(MethodsToGen_Extensions_ServiceCollection methods) => (_sourceGenSpec.MethodsToGen_ServiceCollectionExt & methods) != 0; - private void EmitBindingExtensions_IServiceCollection() { - if (!ShouldEmitMethods(MethodsToGen_Extensions_ServiceCollection.Any)) + if (!ShouldEmitMethods(MethodsToGen.ServiceCollectionExt_Any)) { return; } @@ -26,26 +24,26 @@ private void EmitConfigureMethods() const string defaultNameExpr = "string.Empty"; string configParam = $"{Identifier.IConfiguration} {Identifier.config}"; - if (ShouldEmitMethods(MethodsToGen_Extensions_ServiceCollection.Configure_T)) + if (ShouldEmitMethods(MethodsToGen.ServiceCollectionExt_Configure_T)) { - EmitStartMethod(MethodsToGen_Extensions_ServiceCollection.Configure_T, configParam); + EmitStartMethod(MethodsToGen.ServiceCollectionExt_Configure_T, configParam); _writer.WriteLine($"return {Identifier.Configure}<{Identifier.TOptions}>({Identifier.services}, {defaultNameExpr}, {Identifier.config}, {Identifier.configureOptions}: null);"); EmitEndBlock(); } - if (ShouldEmitMethods(MethodsToGen_Extensions_ServiceCollection.Configure_T_name)) + if (ShouldEmitMethods(MethodsToGen.ServiceCollectionExt_Configure_T_name)) { EmitStartMethod( - MethodsToGen_Extensions_ServiceCollection.Configure_T_name, + MethodsToGen.ServiceCollectionExt_Configure_T_name, paramList: $"string? {Identifier.name}, " + configParam); _writer.WriteLine($"return {Identifier.Configure}<{Identifier.TOptions}>({Identifier.services}, {Identifier.name}, {Identifier.config}, {Identifier.configureOptions}: null);"); EmitEndBlock(); } - if (ShouldEmitMethods(MethodsToGen_Extensions_ServiceCollection.Configure_T_BinderOptions)) + if (ShouldEmitMethods(MethodsToGen.ServiceCollectionExt_Configure_T_BinderOptions)) { EmitStartMethod( - MethodsToGen_Extensions_ServiceCollection.Configure_T_BinderOptions, + MethodsToGen.ServiceCollectionExt_Configure_T_BinderOptions, paramList: configParam + $", {TypeDisplayString.NullableActionOfBinderOptions} {Identifier.configureOptions}"); _writer.WriteLine($"return {Identifier.Configure}<{Identifier.TOptions}>({Identifier.services}, {defaultNameExpr}, {Identifier.config}, {Identifier.configureOptions});"); EmitEndBlock(); @@ -54,7 +52,7 @@ private void EmitConfigureMethods() // Core Configure method that the other overloads call. // Like the others, it is public API that could be called directly by users. // So, it is always generated whenever a Configure overload is called. - EmitStartMethod(MethodsToGen_Extensions_ServiceCollection.Configure_T_name_BinderOptions, paramList: $"string? {Identifier.name}, " + configParam + $", {TypeDisplayString.NullableActionOfBinderOptions} {Identifier.configureOptions}"); + EmitStartMethod(MethodsToGen.ServiceCollectionExt_Configure_T_name_BinderOptions, paramList: $"string? {Identifier.name}, " + configParam + $", {TypeDisplayString.NullableActionOfBinderOptions} {Identifier.configureOptions}"); EmitCheckForNullArgument_WithBlankLine(Identifier.services); EmitCheckForNullArgument_WithBlankLine(Identifier.config); _writer.WriteLine($$""" @@ -65,7 +63,7 @@ private void EmitConfigureMethods() EmitEndBlock(); } - private void EmitStartMethod(MethodsToGen_Extensions_ServiceCollection overload, string paramList) + private void EmitStartMethod(MethodsToGen overload, string paramList) { paramList = $"this {Identifier.IServiceCollection} {Identifier.services}, {paramList}"; diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Microsoft.Extensions.Configuration.Binder.SourceGeneration.csproj b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Microsoft.Extensions.Configuration.Binder.SourceGeneration.csproj index de629b1a641ac..b66741549b458 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Microsoft.Extensions.Configuration.Binder.SourceGeneration.csproj +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Microsoft.Extensions.Configuration.Binder.SourceGeneration.csproj @@ -9,21 +9,30 @@ $(DefineConstants);LAUNCH_DEBUGGER + + + $(NetCoreAppToolCurrent);netstandard2.0 + + - - - + + + + + + + @@ -36,20 +45,20 @@ - + + - - + + + - - diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/BinderInvocation.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/BinderInvocation.cs index ad7c4c09204d4..b1cf51acb3b4a 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/BinderInvocation.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/BinderInvocation.cs @@ -1,4 +1,4 @@ -// Licensed to the .NET Foundation under one or more agreements. +// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics; @@ -9,8 +9,17 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { - internal sealed record BinderInvocation(IInvocationOperation Operation, Location Location) + internal sealed class BinderInvocation { + private BinderInvocation(IInvocationOperation operation, Location location) + { + Operation = operation; + Location = location; + } + + public IInvocationOperation Operation { get; } + public Location Location { get; } + public static BinderInvocation? Create(GeneratorSyntaxContext context, CancellationToken cancellationToken) { Debug.Assert(IsCandidateSyntaxNode(context.Node)); @@ -35,8 +44,8 @@ public static bool IsCandidateSyntaxNode(SyntaxNode node) } && IsCandidateBindingMethodName(memberName); static bool IsCandidateBindingMethodName(string name) => - IsCandidateMethodName_ConfigurationBinder(name) || - IsCandidateMethodName_OptionsBuilderConfigurationExtensions(name) || + IsValidMethodName_ConfigurationBinder(name) || + IsValidMethodName_OptionsBuilderConfigurationExtensions(name) || IsValidMethodName_OptionsConfigurationServiceCollectionExtensions(name); } @@ -62,10 +71,10 @@ public static bool IsBindingOperation(IInvocationOperation operation) { "ConfigurationBinder" => containingNamespaceName is "Microsoft.Extensions.Configuration" && - IsCandidateMethodName_ConfigurationBinder(methodName), + IsValidMethodName_ConfigurationBinder(methodName), "OptionsBuilderConfigurationExtensions" => containingNamespaceName is "Microsoft.Extensions.DependencyInjection" && - IsCandidateMethodName_OptionsBuilderConfigurationExtensions(methodName), + IsValidMethodName_OptionsBuilderConfigurationExtensions(methodName), "OptionsConfigurationServiceCollectionExtensions" => containingNamespaceName is "Microsoft.Extensions.DependencyInjection" && IsValidMethodName_OptionsConfigurationServiceCollectionExtensions(methodName), @@ -73,16 +82,10 @@ containingNamespaceName is "Microsoft.Extensions.DependencyInjection" && }; } - private static bool IsCandidateMethodName_ConfigurationBinder(string name) => name is - nameof(MethodsToGen_ConfigurationBinder.Bind) or - nameof(MethodsToGen_ConfigurationBinder.Get) or - nameof(MethodsToGen_ConfigurationBinder.GetValue); + private static bool IsValidMethodName_ConfigurationBinder(string name) => name is "Bind" or "Get" or "GetValue"; - private static bool IsCandidateMethodName_OptionsBuilderConfigurationExtensions(string name) => name is - nameof(MethodsToGen_Extensions_OptionsBuilder.Bind) or - nameof(MethodsToGen_Extensions_OptionsBuilder.BindConfiguration); + private static bool IsValidMethodName_OptionsBuilderConfigurationExtensions(string name) => name is "Bind" or "BindConfiguration"; - private static bool IsValidMethodName_OptionsConfigurationServiceCollectionExtensions(string name) => name is - nameof(MethodsToGen_Extensions_ServiceCollection.Configure); + private static bool IsValidMethodName_OptionsConfigurationServiceCollectionExtensions(string name) => name is "Configure"; } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/ConfigurationBinder.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/ConfigurationBinder.cs index 3996142adf908..645786e35c1c5 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/ConfigurationBinder.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/ConfigurationBinder.cs @@ -6,28 +6,29 @@ using System.Linq; using Microsoft.CodeAnalysis.Operations; using Microsoft.CodeAnalysis; +using System.Diagnostics; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { public sealed partial class ConfigurationBindingGenerator { - private sealed partial class Parser + internal sealed partial class Parser { private void ParseInvocation_ConfigurationBinder(BinderInvocation invocation) { switch (invocation.Operation.TargetMethod.Name) { - case nameof(MethodsToGen_ConfigurationBinder.Bind): + case "Bind": { ParseBindInvocation_ConfigurationBinder(invocation); } break; - case nameof(MethodsToGen_ConfigurationBinder.Get): + case "Get": { ParseGetInvocation(invocation); } break; - case nameof(MethodsToGen_ConfigurationBinder.GetValue): + case "GetValue": { ParseGetValueInvocation(invocation); } @@ -46,39 +47,39 @@ private void ParseBindInvocation_ConfigurationBinder(BinderInvocation invocation return; } - MethodsToGen_ConfigurationBinder overload = MethodsToGen_ConfigurationBinder.None; + MethodsToGen overload = MethodsToGen.None; if (paramCount is 2) { - overload = MethodsToGen_ConfigurationBinder.Bind_instance; + overload = MethodsToGen.ConfigBinder_Bind_instance; } else if (paramCount is 3) { if (@params[1].Type.SpecialType is SpecialType.System_String) { - overload = MethodsToGen_ConfigurationBinder.Bind_key_instance; + overload = MethodsToGen.ConfigBinder_Bind_key_instance; } else if (SymbolEqualityComparer.Default.Equals(@params[2].Type, _typeSymbols.ActionOfBinderOptions)) { - overload = MethodsToGen_ConfigurationBinder.Bind_instance_BinderOptions; + overload = MethodsToGen.ConfigBinder_Bind_instance_BinderOptions; } } - if (overload is MethodsToGen_ConfigurationBinder.None) + if (overload is MethodsToGen.None) { return; } int instanceIndex = overload switch { - MethodsToGen_ConfigurationBinder.Bind_instance => 1, - MethodsToGen_ConfigurationBinder.Bind_instance_BinderOptions => 1, - MethodsToGen_ConfigurationBinder.Bind_key_instance => 2, + MethodsToGen.ConfigBinder_Bind_instance => 1, + MethodsToGen.ConfigBinder_Bind_instance_BinderOptions => 1, + MethodsToGen.ConfigBinder_Bind_key_instance => 2, _ => throw new InvalidOperationException() }; IArgumentOperation instanceArg = GetArgumentForParameterAtIndex(operation.Arguments, instanceIndex); - if (instanceArg.Parameter.Type.SpecialType != SpecialType.System_Object) + if (instanceArg.Parameter?.Type.SpecialType is not SpecialType.System_Object) { return; } @@ -87,20 +88,17 @@ private void ParseBindInvocation_ConfigurationBinder(BinderInvocation invocation if (!IsValidRootConfigType(type)) { - _context.ReportDiagnostic(Diagnostic.Create(Diagnostics.CouldNotDetermineTypeInfo, invocation.Location)); + RecordDiagnostic(DiagnosticDescriptors.CouldNotDetermineTypeInfo, invocation.Location); return; } - if (type!.IsValueType) + if (type.IsValueType) { - _context.ReportDiagnostic(Diagnostic.Create(Diagnostics.ValueTypesInvalidForBind, invocation.Location, type)); + RecordDiagnostic(DiagnosticDescriptors.ValueTypesInvalidForBind, invocation.Location, messageArgs: new object[] { type }); return; } - if (GetTargetTypeForRootInvocationCore(type, invocation.Location) is TypeSpec typeSpec) - { - RegisterInterceptor(overload, typeSpec, invocation.Operation); - } + EnqueueTargetTypeForRootInvocation(type, overload, invocation); static ITypeSymbol? ResolveType(IOperation conversionOperation) => conversionOperation switch @@ -144,7 +142,7 @@ private void ParseGetInvocation(BinderInvocation invocation) return; } - MethodsToGen_ConfigurationBinder overload = MethodsToGen_ConfigurationBinder.None; + MethodsToGen overload = MethodsToGen.None; ITypeSymbol? type; if (targetMethod.IsGenericMethod) @@ -158,11 +156,11 @@ private void ParseGetInvocation(BinderInvocation invocation) if (paramCount is 1) { - overload = MethodsToGen_ConfigurationBinder.Get_T; + overload = MethodsToGen.ConfigBinder_Get_T; } else if (paramCount is 2 && SymbolEqualityComparer.Default.Equals(@params[1].Type, _typeSymbols.ActionOfBinderOptions)) { - overload = MethodsToGen_ConfigurationBinder.Get_T_BinderOptions; + overload = MethodsToGen.ConfigBinder_Get_T_BinderOptions; } } else if (paramCount > 3) @@ -176,20 +174,15 @@ private void ParseGetInvocation(BinderInvocation invocation) if (paramCount is 2) { - overload = MethodsToGen_ConfigurationBinder.Get_TypeOf; + overload = MethodsToGen.ConfigBinder_Get_TypeOf; } else if (paramCount is 3 && SymbolEqualityComparer.Default.Equals(@params[2].Type, _typeSymbols.ActionOfBinderOptions)) { - overload = MethodsToGen_ConfigurationBinder.Get_TypeOf_BinderOptions; + overload = MethodsToGen.ConfigBinder_Get_TypeOf_BinderOptions; } } - if (GetTargetTypeForRootInvocation(type, invocation.Location) is TypeSpec typeSpec) - { - RegisterInvocation(overload, invocation.Operation); - RegisterTypeForGetCoreGen(typeSpec); - } - + EnqueueTargetTypeForRootInvocation(type, overload, invocation); } private void ParseGetValueInvocation(BinderInvocation invocation) @@ -199,7 +192,7 @@ private void ParseGetValueInvocation(BinderInvocation invocation) ImmutableArray @params = targetMethod.Parameters; int paramCount = @params.Length; - MethodsToGen_ConfigurationBinder overload = MethodsToGen_ConfigurationBinder.None; + MethodsToGen overload = MethodsToGen.None; ITypeSymbol? type; if (targetMethod.IsGenericMethod) @@ -213,11 +206,11 @@ private void ParseGetValueInvocation(BinderInvocation invocation) if (paramCount is 2) { - overload = MethodsToGen_ConfigurationBinder.GetValue_T_key; + overload = MethodsToGen.ConfigBinder_GetValue_T_key; } else if (paramCount is 3 && SymbolEqualityComparer.Default.Equals(@params[2].Type, type)) { - overload = MethodsToGen_ConfigurationBinder.GetValue_T_key_defaultValue; + overload = MethodsToGen.ConfigBinder_GetValue_T_key_defaultValue; } } else if (paramCount > 4) @@ -236,45 +229,56 @@ private void ParseGetValueInvocation(BinderInvocation invocation) if (paramCount is 3) { - overload = MethodsToGen_ConfigurationBinder.GetValue_TypeOf_key; + overload = MethodsToGen.ConfigBinder_GetValue_TypeOf_key; } else if (paramCount is 4 && @params[3].Type.SpecialType is SpecialType.System_Object) { - overload = MethodsToGen_ConfigurationBinder.GetValue_TypeOf_key_defaultValue; + overload = MethodsToGen.ConfigBinder_GetValue_TypeOf_key_defaultValue; } } - ITypeSymbol effectiveType = (IsNullable(type, out ITypeSymbol? underlyingType) ? underlyingType : type)!; - if (!IsValidRootConfigType(type)) { - _context.ReportDiagnostic(Diagnostic.Create(Diagnostics.CouldNotDetermineTypeInfo, invocation.Location)); + RecordDiagnostic(DiagnosticDescriptors.CouldNotDetermineTypeInfo, invocation.Location); return; } - if (IsParsableFromString(effectiveType, out _) && - GetTargetTypeForRootInvocationCore(type, invocation.Location) is TypeSpec typeSpec) + ITypeSymbol effectiveType = IsNullable(type, out ITypeSymbol? underlyingType) ? underlyingType : type; + + if (IsParsableFromString(effectiveType, out _)) { - RegisterInvocation(overload, invocation.Operation); - RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.GetValueCore, typeSpec); + EnqueueTargetTypeForRootInvocation(type, overload, invocation); } } - private void RegisterInvocation(MethodsToGen_ConfigurationBinder overload, IInvocationOperation operation) + private void RegisterInterceptor_ConfigurationBinder(TypeParseInfo typeParseInfo, TypeSpec typeSpec) { - _sourceGenSpec.MethodsToGen_ConfigurationBinder |= overload; - RegisterInterceptor(overload, operation); - } + MethodsToGen overload = typeParseInfo.BindingOverload; + IInvocationOperation invocationOperation = typeParseInfo.BinderInvocation!.Operation; + Debug.Assert((MethodsToGen.ConfigBinder_Any & overload) is not 0); - /// - /// Registers generated Bind methods as interceptors. This is done differently from other root - /// methods because we need to - /// explicitly account for the type to bind, to avoid type-check issues for polymorphic objects. - /// - private void RegisterInterceptor(MethodsToGen_ConfigurationBinder overload, TypeSpec typeSpec, IInvocationOperation operation) - { - _sourceGenSpec.MethodsToGen_ConfigurationBinder |= overload; - _sourceGenSpec.InterceptionInfo_ConfigBinder.RegisterOverloadInfo(overload, typeSpec, operation); + if ((MethodsToGen.ConfigBinder_Bind & overload) is not 0) + { + if (typeSpec is ComplexTypeSpec complexTypeSpec && + _helperInfoBuilder!.TryRegisterTransitiveTypesForMethodGen(complexTypeSpec.TypeRef)) + { + _interceptorInfoBuilder.RegisterInterceptor_ConfigBinder_Bind(overload, complexTypeSpec, invocationOperation); + } + } + else + { + Debug.Assert((MethodsToGen.ConfigBinder_Get & overload) is not 0 || + (MethodsToGen.ConfigBinder_GetValue & overload) is not 0); + + bool registered = (MethodsToGen.ConfigBinder_Get & overload) is not 0 + ? _helperInfoBuilder!.TryRegisterTypeForGetGen(typeSpec) + : _helperInfoBuilder!.TryRegisterTypeForGetValueGen(typeSpec); + + if (registered) + { + _interceptorInfoBuilder.RegisterInterceptor(overload, invocationOperation); + } + } } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/Diagnostics.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/DiagnosticDescriptors.cs similarity index 82% rename from src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/Diagnostics.cs rename to src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/DiagnosticDescriptors.cs index d6d816545bcd0..3f694c78be830 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/Diagnostics.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/DiagnosticDescriptors.cs @@ -9,9 +9,9 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { public sealed partial class ConfigurationBindingGenerator { - private sealed partial class Parser + internal sealed partial class Parser { - internal static class Diagnostics + private static class DiagnosticDescriptors { public static DiagnosticDescriptor TypeNotSupported { get; } = CreateTypeNotSupportedDescriptor(nameof(SR.TypeNotSupported)); public static DiagnosticDescriptor MissingPublicInstanceConstructor { get; } = CreateTypeNotSupportedDescriptor(nameof(SR.MissingPublicInstanceConstructor)); @@ -62,6 +62,20 @@ private static DiagnosticDescriptor CreateTypeNotSupportedDescriptor(string name category: ProjectName, defaultSeverity: DiagnosticSeverity.Warning, isEnabledByDefault: true); + + public static DiagnosticDescriptor GetNotSupportedDescriptor(NotSupportedReason reason) => + reason switch + { + NotSupportedReason.UnknownType => TypeNotSupported, + NotSupportedReason.MissingPublicInstanceConstructor => MissingPublicInstanceConstructor, + NotSupportedReason.CollectionNotSupported => CollectionNotSupported, + NotSupportedReason.DictionaryKeyNotSupported => DictionaryKeyNotSupported, + NotSupportedReason.ElementTypeNotSupported => ElementTypeNotSupported, + NotSupportedReason.MultipleParameterizedConstructors => MultipleParameterizedConstructors, + NotSupportedReason.MultiDimArraysNotSupported => MultiDimArraysNotSupported, + NotSupportedReason.NullableUnderlyingTypeNotSupported => NullableUnderlyingTypeNotSupported, + _ => throw new InvalidOperationException() + }; } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/Extensions.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/Extensions.cs index fa0b3691ec404..f685842639966 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/Extensions.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/Extensions.cs @@ -8,6 +8,54 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { + public sealed partial class ConfigurationBindingGenerator + { + internal sealed partial class Parser + { + private readonly struct TypeParseInfo + { + public ITypeSymbol TypeSymbol { get; private init; } + public string TypeName { get; private init; } + public MethodsToGen BindingOverload { get; private init; } + public BinderInvocation BinderInvocation { get; private init; } + public ContainingTypeDiagnosticInfo? ContainingTypeDiagnosticInfo { get; private init; } + + public static TypeParseInfo Create(ITypeSymbol typeSymbol, MethodsToGen overload, BinderInvocation invocation, ContainingTypeDiagnosticInfo? containingTypeDiagInfo = null) => + new TypeParseInfo + { + TypeSymbol = typeSymbol, + TypeName = typeSymbol.GetName(), + BindingOverload = overload, + BinderInvocation = invocation, + ContainingTypeDiagnosticInfo = containingTypeDiagInfo, + }; + + public TypeParseInfo ToTransitiveTypeParseInfo(ITypeSymbol memberType, DiagnosticDescriptor? diagDescriptor = null, string? memberName = null) + { + ContainingTypeDiagnosticInfo? diagnosticInfo = diagDescriptor is null + ? null + : new() + { + TypeName = TypeName, + Descriptor = diagDescriptor, + MemberName = memberName, + ContainingTypeInfo = ContainingTypeDiagnosticInfo, + }; + + return Create(memberType, BindingOverload, BinderInvocation, diagnosticInfo); + } + } + + private sealed class ContainingTypeDiagnosticInfo + { + public required string TypeName { get; init; } + public required string? MemberName { get; init; } + public required DiagnosticDescriptor Descriptor { get; init; } + public required ContainingTypeDiagnosticInfo? ContainingTypeInfo { get; init; } + } + } + } + internal static class ParserExtensions { private static readonly SymbolDisplayFormat s_identifierCompatibleFormat = new SymbolDisplayFormat( @@ -16,6 +64,12 @@ internal static class ParserExtensions genericsOptions: SymbolDisplayGenericsOptions.None, miscellaneousOptions: SymbolDisplayMiscellaneousOptions.UseSpecialTypes); + private static readonly SymbolDisplayFormat s_minimalDisplayFormat = new SymbolDisplayFormat( + globalNamespaceStyle: SymbolDisplayGlobalNamespaceStyle.Omitted, + typeQualificationStyle: SymbolDisplayTypeQualificationStyle.NameAndContainingTypes, + genericsOptions: SymbolDisplayGenericsOptions.IncludeTypeParameters, + miscellaneousOptions: SymbolDisplayMiscellaneousOptions.UseSpecialTypes); + public static void RegisterCacheEntry(this Dictionary cache, TKey key, TEntry entry) where TKey : notnull where TValue : ICollection, new() @@ -28,12 +82,6 @@ public static void RegisterCacheEntry(this Dictionary> source, out ComplexTypeSpec Key, out List Value) - { - Key = (ComplexTypeSpec)source.Key; - Value = source.Value; - } - public static string ToIdentifierCompatibleSubstring(this ITypeSymbol type) { if (type is IArrayTypeSymbol arrayType) @@ -64,5 +112,15 @@ public static string ToIdentifierCompatibleSubstring(this ITypeSymbol type) return sb.ToString(); } + + public static (string? Namespace, string DisplayString, string Name) GetTypeName(this ITypeSymbol type) + { + string? @namespace = type.ContainingNamespace is { IsGlobalNamespace: false } containingNamespace ? containingNamespace.ToDisplayString() : null; + string displayString = type.ToDisplayString(s_minimalDisplayFormat); + string name = (@namespace is null ? string.Empty : @namespace + ".") + displayString.Replace(".", "+"); + return (@namespace, displayString, name); + } + + public static string GetName(this ITypeSymbol type) => GetTypeName(type).Name; } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/KnownTypeSymbols.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/KnownTypeSymbols.cs similarity index 96% rename from src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/KnownTypeSymbols.cs rename to src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/KnownTypeSymbols.cs index e381dc9c7c43e..07dae8689782e 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/KnownTypeSymbols.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/KnownTypeSymbols.cs @@ -11,7 +11,7 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { - internal sealed record KnownTypeSymbols + internal sealed class KnownTypeSymbols { public CSharpCompilation Compilation { get; } @@ -37,7 +37,7 @@ internal sealed record KnownTypeSymbols public INamedTypeSymbol? OptionsConfigurationServiceCollectionExtensions { get; } public INamedTypeSymbol GenericIList_Unbound { get; } - public INamedTypeSymbol GenericICollection_Unbound { get; } + public INamedTypeSymbol? GenericICollection_Unbound { get; } public INamedTypeSymbol GenericICollection { get; } public INamedTypeSymbol GenericIEnumerable_Unbound { get; } public INamedTypeSymbol IEnumerable { get; } @@ -61,7 +61,8 @@ public KnownTypeSymbols(CSharpCompilation compilation) { Compilation = compilation; - // Primitives (needed because they are Microsoft.CodeAnalysis.SpecialType.None) + // Primitives + String = compilation.GetSpecialType(SpecialType.System_String); CultureInfo = compilation.GetBestTypeByMetadataName(typeof(CultureInfo)); DateOnly = compilation.GetBestTypeByMetadataName("System.DateOnly"); DateTimeOffset = compilation.GetBestTypeByMetadataName(typeof(DateTimeOffset)); @@ -103,7 +104,7 @@ public KnownTypeSymbols(CSharpCompilation compilation) // Used for type equivalency checks for unbound generics. The parameters of the types // retured by the Roslyn Get*Type* APIs are not unbound, so we construct unbound // generics to equal those corresponding to generic types in the input type graphs. - GenericICollection_Unbound = GenericICollection?.ConstructUnboundGenericType(); + GenericICollection_Unbound = GenericICollection.ConstructUnboundGenericType(); GenericIDictionary_Unbound = GenericIDictionary?.ConstructUnboundGenericType(); GenericIEnumerable_Unbound = compilation.GetSpecialType(SpecialType.System_Collections_Generic_IEnumerable_T).ConstructUnboundGenericType(); GenericIList_Unbound = compilation.GetSpecialType(SpecialType.System_Collections_Generic_IList_T).ConstructUnboundGenericType(); diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/OptionsBuilderConfigurationExtensions.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/OptionsBuilderConfigurationExtensions.cs index 9cf59a120e1fd..eb0ab086bcd58 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/OptionsBuilderConfigurationExtensions.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/OptionsBuilderConfigurationExtensions.cs @@ -10,7 +10,7 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { public sealed partial class ConfigurationBindingGenerator { - private sealed partial class Parser + internal sealed partial class Parser { private void ParseInvocation_OptionsBuilderExt(BinderInvocation invocation) { @@ -29,22 +29,17 @@ private void ParseInvocation_OptionsBuilderExt(BinderInvocation invocation) // This would violate generic type constraint; any such invocation could not have been included in the initial parser. Debug.Assert(typeSymbol?.IsValueType is not true); - if (GetTargetTypeForRootInvocation(typeSymbol, invocation.Location) is not ComplexTypeSpec typeSpec) - { - return; - } - if (targetMethod.Name is "Bind") { - ParseBindInvocation_OptionsBuilderExt(invocation, typeSpec); + ParseBindInvocation_OptionsBuilderExt(invocation, typeSymbol); } else if (targetMethod.Name is "BindConfiguration") { - ParseBindConfigurationInvocation(invocation, typeSpec); + ParseBindConfigurationInvocation(invocation, typeSymbol); } } - private void ParseBindInvocation_OptionsBuilderExt(BinderInvocation invocation, ComplexTypeSpec typeSpec) + private void ParseBindInvocation_OptionsBuilderExt(BinderInvocation invocation, ITypeSymbol? type) { IInvocationOperation operation = invocation.Operation!; IMethodSymbol targetMethod = operation.TargetMethod; @@ -58,22 +53,21 @@ private void ParseBindInvocation_OptionsBuilderExt(BinderInvocation invocation, return; } - MethodsToGen_Extensions_OptionsBuilder overload = paramCount switch + MethodsToGen overload = paramCount switch { - 2 => MethodsToGen_Extensions_OptionsBuilder.Bind_T, + 2 => MethodsToGen.OptionsBuilderExt_Bind_T, 3 when SymbolEqualityComparer.Default.Equals(_typeSymbols.ActionOfBinderOptions, @params[2].Type) => - MethodsToGen_Extensions_OptionsBuilder.Bind_T_BinderOptions, - _ => MethodsToGen_Extensions_OptionsBuilder.None + MethodsToGen.OptionsBuilderExt_Bind_T_BinderOptions, + _ => MethodsToGen.None }; - if (overload is not MethodsToGen_Extensions_OptionsBuilder.None && - TryRegisterTypeForMethodGen(MethodsToGen_Extensions_ServiceCollection.Configure_T_name_BinderOptions, typeSpec)) + if (overload is not MethodsToGen.None) { - RegisterInvocation(overload, operation); + EnqueueTargetTypeForRootInvocation(type, overload, invocation); } } - private void ParseBindConfigurationInvocation(BinderInvocation invocation, ComplexTypeSpec typeSpec) + private void ParseBindConfigurationInvocation(BinderInvocation invocation, ITypeSymbol? type) { IMethodSymbol targetMethod = invocation.Operation.TargetMethod; ImmutableArray @params = targetMethod.Parameters; @@ -83,23 +77,41 @@ private void ParseBindConfigurationInvocation(BinderInvocation invocation, Compl if (paramCount is 3 && @params[1].Type.SpecialType is SpecialType.System_String && - SymbolEqualityComparer.Default.Equals(_typeSymbols.ActionOfBinderOptions, @params[2].Type) && - TryRegisterTypeForBindCoreMainGen(typeSpec)) + SymbolEqualityComparer.Default.Equals(_typeSymbols.ActionOfBinderOptions, @params[2].Type)) { - RegisterInvocation(MethodsToGen_Extensions_OptionsBuilder.BindConfiguration_T_path_BinderOptions, invocation.Operation); + EnqueueTargetTypeForRootInvocation(type, MethodsToGen.OptionsBuilderExt_BindConfiguration_T_path_BinderOptions, invocation); } } - private void RegisterInvocation(MethodsToGen_Extensions_OptionsBuilder overload, IInvocationOperation operation) + private void RegisterInterceptor_OptionsBuilderExt(TypeParseInfo typeParseInfo, TypeSpec typeSpec) { - _sourceGenSpec.MethodsToGen_OptionsBuilderExt |= overload; - RegisterInterceptor(overload, operation); + MethodsToGen overload = typeParseInfo.BindingOverload; + Debug.Assert((MethodsToGen.OptionsBuilderExt_Any & overload) is not 0); + + if (typeSpec is not ComplexTypeSpec complexTypeSpec) + { + return; + } + + if ((MethodsToGen.OptionsBuilderExt_Bind & overload) is not 0) + { + if (!TryRegisterTypeForOverloadGen_ServiceCollectionExt(MethodsToGen.ServiceCollectionExt_Configure_T_name_BinderOptions, complexTypeSpec)) + { + return; + } + } + else if (!_helperInfoBuilder!.TryRegisterTypeForBindCoreMainGen(complexTypeSpec)) + { + return; + } + + _interceptorInfoBuilder.RegisterInterceptor(typeParseInfo.BindingOverload, typeParseInfo.BinderInvocation.Operation); // Emitting refs to IOptionsChangeTokenSource, ConfigurationChangeTokenSource. - _sourceGenSpec.Namespaces.Add("Microsoft.Extensions.Options"); + _helperInfoBuilder!.RegisterNamespace("Microsoft.Extensions.Options"); // Emitting refs to OptionsBuilder. - _sourceGenSpec.Namespaces.Add("Microsoft.Extensions.DependencyInjection"); + _helperInfoBuilder!.RegisterNamespace("Microsoft.Extensions.DependencyInjection"); } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/OptionsConfigurationServiceCollectionExtensions.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/OptionsConfigurationServiceCollectionExtensions.cs index e86231f32e42a..1ccef24bc6b71 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/OptionsConfigurationServiceCollectionExtensions.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Parser/OptionsConfigurationServiceCollectionExtensions.cs @@ -10,7 +10,7 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { public sealed partial class ConfigurationBindingGenerator { - private sealed partial class Parser + internal sealed partial class Parser { private void ParseInvocation_ServiceCollectionExt(BinderInvocation invocation) { @@ -30,11 +30,11 @@ private void ParseInvocation_ServiceCollectionExt(BinderInvocation invocation) return; } - MethodsToGen_Extensions_ServiceCollection overload; + MethodsToGen overload; if (paramCount is 2 && SymbolEqualityComparer.Default.Equals(_typeSymbols.IConfiguration, @params[1].Type)) { - overload = MethodsToGen_Extensions_ServiceCollection.Configure_T; + overload = MethodsToGen.ServiceCollectionExt_Configure_T; } else if (paramCount is 3) { @@ -44,12 +44,12 @@ private void ParseInvocation_ServiceCollectionExt(BinderInvocation invocation) if (secondParamType.SpecialType is SpecialType.System_String && SymbolEqualityComparer.Default.Equals(_typeSymbols.IConfiguration, thirdParamType)) { - overload = MethodsToGen_Extensions_ServiceCollection.Configure_T_name; + overload = MethodsToGen.ServiceCollectionExt_Configure_T_name; } else if (SymbolEqualityComparer.Default.Equals(_typeSymbols.IConfiguration, secondParamType) && SymbolEqualityComparer.Default.Equals(_typeSymbols.ActionOfBinderOptions, thirdParamType)) { - overload = MethodsToGen_Extensions_ServiceCollection.Configure_T_BinderOptions; + overload = MethodsToGen.ServiceCollectionExt_Configure_T_BinderOptions; } else { @@ -61,7 +61,7 @@ @params[1].Type.SpecialType is SpecialType.System_String && SymbolEqualityComparer.Default.Equals(_typeSymbols.IConfiguration, @params[2].Type) && SymbolEqualityComparer.Default.Equals(_typeSymbols.ActionOfBinderOptions, @params[3].Type)) { - overload = MethodsToGen_Extensions_ServiceCollection.Configure_T_name_BinderOptions; + overload = MethodsToGen.ServiceCollectionExt_Configure_T_name_BinderOptions; } else { @@ -73,25 +73,34 @@ @params[1].Type.SpecialType is SpecialType.System_String && // This would violate generic type constraint; any such invocation could not have been included in the initial parser. Debug.Assert(typeSymbol?.IsValueType is not true); - if (GetTargetTypeForRootInvocation(typeSymbol, invocation.Location) is ComplexTypeSpec typeSpec && - TryRegisterTypeForMethodGen(overload, typeSpec)) + EnqueueTargetTypeForRootInvocation(typeSymbol, overload, invocation); + } + + private void RegisterInterceptor_ServiceCollectionExt(TypeParseInfo typeParseInfo, TypeSpec typeSpec) + { + MethodsToGen overload = typeParseInfo.BindingOverload; + + if (typeSpec is ComplexTypeSpec complexTypeSpec && + TryRegisterTypeForOverloadGen_ServiceCollectionExt(overload, complexTypeSpec)) { - RegisterInterceptor(overload, operation); + _interceptorInfoBuilder.RegisterInterceptor(overload, typeParseInfo.BinderInvocation.Operation); } } - private bool TryRegisterTypeForMethodGen(MethodsToGen_Extensions_ServiceCollection overload, ComplexTypeSpec typeSpec) + private bool TryRegisterTypeForOverloadGen_ServiceCollectionExt(MethodsToGen overload, ComplexTypeSpec typeSpec) { - if (TryRegisterTypeForBindCoreMainGen(typeSpec)) + Debug.Assert((MethodsToGen.ServiceCollectionExt_Any & overload) is not 0); + + if (!_helperInfoBuilder!.TryRegisterTypeForBindCoreMainGen(typeSpec)) { - _sourceGenSpec.MethodsToGen_ServiceCollectionExt |= overload; - _sourceGenSpec.Namespaces.Add("Microsoft.Extensions.DependencyInjection"); - // Emitting refs to IOptionsChangeTokenSource, ConfigurationChangeTokenSource, IConfigureOptions<>, ConfigureNamedOptions<>. - _sourceGenSpec.Namespaces.Add("Microsoft.Extensions.Options"); - return true; + return false; } - return false; + _interceptorInfoBuilder.MethodsToGen |= overload; + _helperInfoBuilder!.RegisterNamespace("Microsoft.Extensions.DependencyInjection"); + // Emitting refs to IOptionsChangeTokenSource, ConfigurationChangeTokenSource, IConfigureOptions<>, ConfigureNamedOptions<>. + _helperInfoBuilder!.RegisterNamespace("Microsoft.Extensions.Options"); + return true; } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/BindingHelperInfo.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/BindingHelperInfo.cs new file mode 100644 index 0000000000000..096c8410717ae --- /dev/null +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/BindingHelperInfo.cs @@ -0,0 +1,237 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using SourceGenerators; + +namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration +{ + public sealed record BindingHelperInfo + { + public required ImmutableEquatableArray Namespaces { get; init; } + public required bool EmitConfigurationKeyCaches { get; init; } + + public required MethodsToGen_CoreBindingHelper MethodsToGen { get; init; } + public required ImmutableEquatableArray? TypesForGen_BindCoreMain { get; init; } + public required ImmutableEquatableArray? TypesForGen_GetCore { get; init; } + public required ImmutableEquatableArray? TypesForGen_GetValueCore { get; init; } + public required ImmutableEquatableArray? TypesForGen_BindCore { get; init; } + public required ImmutableEquatableArray? TypesForGen_Initialize { get; init; } + public required ImmutableEquatableArray? TypesForGen_ParsePrimitive { get; init; } + + internal sealed class Builder(TypeIndex _typeIndex) + { + private readonly Dictionary _seenTransitiveTypes = new(); + + private MethodsToGen_CoreBindingHelper _methodsToGen; + private bool _emitConfigurationKeyCaches; + + private readonly Dictionary> _typesForGen = new(); + + private readonly SortedSet _namespaces = new() + { + "System", + "System.CodeDom.Compiler", + "System.Globalization", + "System.Runtime.CompilerServices", + "Microsoft.Extensions.Configuration", + }; + + public BindingHelperInfo ToIncrementalValue() + { + return new BindingHelperInfo + { + Namespaces = _namespaces.ToImmutableEquatableArray(), + EmitConfigurationKeyCaches = _emitConfigurationKeyCaches, + + MethodsToGen = _methodsToGen, + TypesForGen_GetCore = GetTypesForGen_CoreBindingHelper(MethodsToGen_CoreBindingHelper.GetCore), + TypesForGen_BindCoreMain = GetTypesForGen_CoreBindingHelper(MethodsToGen_CoreBindingHelper.BindCoreMain), + TypesForGen_GetValueCore = GetTypesForGen_CoreBindingHelper(MethodsToGen_CoreBindingHelper.GetValueCore), + TypesForGen_BindCore = GetTypesForGen_CoreBindingHelper(MethodsToGen_CoreBindingHelper.BindCore), + TypesForGen_Initialize = GetTypesForGen_CoreBindingHelper(MethodsToGen_CoreBindingHelper.Initialize), + TypesForGen_ParsePrimitive = GetTypesForGen_CoreBindingHelper(MethodsToGen_CoreBindingHelper.ParsePrimitive) + }; + + ImmutableEquatableArray? GetTypesForGen_CoreBindingHelper(MethodsToGen_CoreBindingHelper overload) + where TSpec : TypeSpec, IEquatable + { + _typesForGen.TryGetValue(overload, out HashSet? typesAsBase); + + if (typesAsBase is null) + { + return null; + } + + IEnumerable types = typeof(TSpec) == typeof(TypeSpec) + ? (HashSet)(object)typesAsBase + : typesAsBase.Select(t => (TSpec)t); + + return GetTypesForGen(types); + } + + static ImmutableEquatableArray GetTypesForGen(IEnumerable types) + where TSpec : TypeSpec, IEquatable => + types.ToImmutableEquatableArray(); + } + + public bool TryRegisterTypeForGetGen(TypeSpec type) + { + if (TryRegisterTransitiveTypesForMethodGen(type.TypeRef)) + { + RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.GetCore, type); + RegisterForGen_AsConfigWithChildrenHelper(); + return true; + } + + return false; + } + + public bool TryRegisterTypeForGetValueGen(TypeSpec typeSpec) + { + ParsableFromStringSpec effectiveType = (ParsableFromStringSpec)_typeIndex.GetEffectiveTypeSpec(typeSpec); + RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.GetValueCore, typeSpec); + RegisterStringParsableTypeIfApplicable(effectiveType); + return true; + } + + public bool TryRegisterTypeForBindCoreMainGen(ComplexTypeSpec type) + { + if (TryRegisterTransitiveTypesForMethodGen(type.TypeRef)) + { + RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.BindCoreMain, type); + RegisterForGen_AsConfigWithChildrenHelper(); + return true; + } + + return false; + } + + public bool TryRegisterTransitiveTypesForMethodGen(TypeRef typeRef) + { + return _seenTransitiveTypes.TryGetValue(typeRef, out bool isValid) + ? isValid + : (_seenTransitiveTypes[typeRef] = TryRegisterCore()); + + bool TryRegisterCore() + { + switch (_typeIndex.GetTypeSpec(typeRef)) + { + case NullableSpec nullableSpec: + { + return TryRegisterTransitiveTypesForMethodGen(nullableSpec.EffectiveTypeRef); + } + case ParsableFromStringSpec stringParsableSpec: + { + RegisterStringParsableTypeIfApplicable(stringParsableSpec); + return true; + } + case DictionarySpec dictionarySpec: + { + bool shouldRegister = _typeIndex.CanBindTo(typeRef) && + TryRegisterTransitiveTypesForMethodGen(dictionarySpec.KeyTypeRef) && + TryRegisterTransitiveTypesForMethodGen(dictionarySpec.ElementTypeRef) && + TryRegisterTypeForBindCoreGen(dictionarySpec); + + if (shouldRegister && dictionarySpec.InstantiationStrategy is CollectionInstantiationStrategy.LinqToDictionary) + { + _namespaces.Add("System.Linq"); + } + + return shouldRegister; + } + case CollectionSpec collectionSpec: + { + return TryRegisterTransitiveTypesForMethodGen(collectionSpec.ElementTypeRef) && + TryRegisterTypeForBindCoreGen(collectionSpec); + } + case ObjectSpec objectSpec: + { + // Base case to avoid stack overflow for recursive object graphs. + // Register all object types for gen; we need to throw runtime exceptions in some cases. + bool shouldRegister = true; + _seenTransitiveTypes.Add(typeRef, shouldRegister); + + // List is used in generated code as a temp holder for formatting + // an error for config properties that don't map to object properties. + _namespaces.Add("System.Collections.Generic"); + + if (_typeIndex.HasBindableMembers(objectSpec)) + { + foreach (PropertySpec property in objectSpec.Properties!) + { + TryRegisterTransitiveTypesForMethodGen(property.TypeRef); + + if (_typeIndex.GetTypeSpec(property.TypeRef) is ComplexTypeSpec) + { + RegisterForGen_AsConfigWithChildrenHelper(); + } + } + + bool registeredForBindCore = TryRegisterTypeForBindCoreGen(objectSpec); + Debug.Assert(registeredForBindCore); + + if (objectSpec is { InstantiationStrategy: ObjectInstantiationStrategy.ParameterizedConstructor, InitExceptionMessage: null }) + { + RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.Initialize, objectSpec); + } + } + + return true; + } + default: + { + return true; + } + } + } + } + + public void RegisterNamespace(string @namespace) => _namespaces.Add(@namespace); + + private bool TryRegisterTypeForBindCoreGen(ComplexTypeSpec type) + { + if (_typeIndex.HasBindableMembers(type)) + { + RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.BindCore, type); + _emitConfigurationKeyCaches = true; + return true; + } + + return false; + } + + private void RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper method, TypeSpec type) + { + if (!_typesForGen.TryGetValue(method, out HashSet? types)) + { + _typesForGen[method] = types = new HashSet(); + } + + if (types.Add(type)) + { + _methodsToGen |= method; + + if (type is { Namespace: string @namespace }) + { + _namespaces.Add(@namespace); + } + } + } + + private void RegisterStringParsableTypeIfApplicable(ParsableFromStringSpec type) + { + if (type.StringParsableTypeKind is not StringParsableTypeKind.AssignFromSectionValue) + { + _methodsToGen |= MethodsToGen_CoreBindingHelper.ParsePrimitive; + RegisterTypeForMethodGen(MethodsToGen_CoreBindingHelper.ParsePrimitive, type); + } + } + + private void RegisterForGen_AsConfigWithChildrenHelper() => _methodsToGen |= MethodsToGen_CoreBindingHelper.AsConfigWithChildren; + } + } +} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/InterceptorInfo.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/InterceptorInfo.cs new file mode 100644 index 0000000000000..999ed6514f99d --- /dev/null +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/InterceptorInfo.cs @@ -0,0 +1,202 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using Microsoft.CodeAnalysis; +using Microsoft.CodeAnalysis.CSharp.Syntax; +using Microsoft.CodeAnalysis.Operations; +using Microsoft.CodeAnalysis.Text; +using SourceGenerators; + +namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration +{ + public sealed record InterceptorInfo + { + public required MethodsToGen MethodsToGen { get; init; } + + public required ImmutableEquatableArray? ConfigBinder_Bind_instance { get; init; } + public required ImmutableEquatableArray? ConfigBinder_Bind_instance_BinderOptions { get; init; } + public required ImmutableEquatableArray? ConfigBinder_Bind_key_instance { get; init; } + + + public required ImmutableEquatableArray? ConfigBinder { get; init; } + public required ImmutableEquatableArray? OptionsBuilderExt { get; init; } + public required ImmutableEquatableArray? ServiceCollectionExt { get; init; } + + public IEnumerable? GetInfo(MethodsToGen interceptor) + { + Debug.Assert((MethodsToGen.ConfigBinder_Bind & interceptor) is 0); + + ImmutableEquatableArray? infoList; + if ((MethodsToGen.ConfigBinder_Any ^ MethodsToGen.ConfigBinder_Bind & interceptor) is not 0) + { + infoList = ConfigBinder; + } + else if ((MethodsToGen.OptionsBuilderExt_Any & interceptor) is not 0) + { + infoList = OptionsBuilderExt; + } + else + { + Debug.Assert((MethodsToGen.ServiceCollectionExt_Any & interceptor) is not 0); + infoList = ServiceCollectionExt; + } + + return infoList?.Where(i => i.Interceptor == interceptor); + } + + internal sealed class Builder + { + private TypedInterceptorInfoBuildler? _configBinder_InfoBuilder_Bind_instance; + private TypedInterceptorInfoBuildler? _configBinder_InfoBuilder_Bind_instance_BinderOptions; + private TypedInterceptorInfoBuildler? _configBinder_InfoBuilder_Bind_key_instance; + + private List? _interceptors_configBinder; + private List? _interceptors_OptionsBuilderExt; + private List? _interceptors_serviceCollectionExt; + + public MethodsToGen MethodsToGen { get; set; } + + public void RegisterInterceptor_ConfigBinder_Bind(MethodsToGen overload, ComplexTypeSpec type, IInvocationOperation invocation) + { + Debug.Assert((MethodsToGen.ConfigBinder_Bind & overload) is not 0); + + switch (overload) + { + case MethodsToGen.ConfigBinder_Bind_instance: + RegisterInterceptor(ref _configBinder_InfoBuilder_Bind_instance); + break; + case MethodsToGen.ConfigBinder_Bind_instance_BinderOptions: + RegisterInterceptor(ref _configBinder_InfoBuilder_Bind_instance_BinderOptions); + break; + case MethodsToGen.ConfigBinder_Bind_key_instance: + RegisterInterceptor(ref _configBinder_InfoBuilder_Bind_key_instance); + break; + } + + MethodsToGen |= overload; + + void RegisterInterceptor(ref TypedInterceptorInfoBuildler? infoBuilder) + { + infoBuilder ??= new TypedInterceptorInfoBuildler(); + infoBuilder.RegisterInterceptor(overload, type, invocation); + } + } + + public void RegisterInterceptor(MethodsToGen overload, IInvocationOperation operation) + { + Debug.Assert((MethodsToGen.ConfigBinder_Bind & overload) is 0); + + if ((MethodsToGen.ConfigBinder_Any ^ MethodsToGen.ConfigBinder_Bind & overload) is not 0) + { + RegisterInterceptor(ref _interceptors_configBinder); + } + else if ((MethodsToGen.OptionsBuilderExt_Any & overload) is not 0) + { + RegisterInterceptor(ref _interceptors_OptionsBuilderExt); + } + else + { + Debug.Assert((MethodsToGen.ServiceCollectionExt_Any & overload) is not 0); + RegisterInterceptor(ref _interceptors_serviceCollectionExt); + } + + MethodsToGen |= overload; + + void RegisterInterceptor(ref List? infoList) + { + infoList ??= new List(); + infoList.Add(new InvocationLocationInfo(overload, operation)); + } + } + + public InterceptorInfo ToIncrementalValue() => + new InterceptorInfo + { + MethodsToGen = MethodsToGen, + + ConfigBinder = _interceptors_configBinder?.ToImmutableEquatableArray(), + OptionsBuilderExt = _interceptors_OptionsBuilderExt?.ToImmutableEquatableArray(), + ServiceCollectionExt = _interceptors_serviceCollectionExt?.ToImmutableEquatableArray(), + + ConfigBinder_Bind_instance = _configBinder_InfoBuilder_Bind_instance?.ToIncrementalValue(), + ConfigBinder_Bind_instance_BinderOptions = _configBinder_InfoBuilder_Bind_instance_BinderOptions?.ToIncrementalValue(), + ConfigBinder_Bind_key_instance = _configBinder_InfoBuilder_Bind_key_instance?.ToIncrementalValue(), + }; + } + } + + internal sealed class TypedInterceptorInfoBuildler + { + private readonly Dictionary _invocationInfoBuilderCache = new(); + + public void RegisterInterceptor(MethodsToGen overload, ComplexTypeSpec type, IInvocationOperation invocation) + { + if (!_invocationInfoBuilderCache.TryGetValue(type, out TypedInterceptorInvocationInfo.Builder? invocationInfoBuilder)) + { + _invocationInfoBuilderCache[type] = invocationInfoBuilder = new TypedInterceptorInvocationInfo.Builder(overload, type); + } + + invocationInfoBuilder.RegisterInvocation(invocation); + } + + public ImmutableEquatableArray? ToIncrementalValue() => + _invocationInfoBuilderCache.Values + .Select(b => b.ToIncrementalValue()) + .ToImmutableEquatableArray(); + } + + public sealed record TypedInterceptorInvocationInfo(ComplexTypeSpec TargetType, ImmutableEquatableArray Locations) + { + public sealed class Builder(MethodsToGen Overload, ComplexTypeSpec TargetType) + { + private readonly List _infoList = new(); + + public void RegisterInvocation(IInvocationOperation invocation) => + _infoList.Add(new InvocationLocationInfo(Overload, invocation)); + + public TypedInterceptorInvocationInfo ToIncrementalValue() => new( + TargetType, + Locations: _infoList.ToImmutableEquatableArray()); + } + } + + public sealed record InvocationLocationInfo + { + public InvocationLocationInfo(MethodsToGen interceptor, IInvocationOperation invocation) + { + Debug.Assert(BinderInvocation.IsBindingOperation(invocation)); + + if (invocation.Syntax is not InvocationExpressionSyntax { Expression: MemberAccessExpressionSyntax memberAccessExprSyntax }) + { + const string InvalidInvocationErrMsg = "The invocation should have been validated upstream when selecting invocations to emit interceptors for."; + throw new ArgumentException(InvalidInvocationErrMsg, nameof(invocation)); + } + + SyntaxTree operationSyntaxTree = invocation.Syntax.SyntaxTree; + TextSpan memberNameSpan = memberAccessExprSyntax.Name.Span; + FileLinePositionSpan linePosSpan = operationSyntaxTree.GetLineSpan(memberNameSpan); + + Interceptor = interceptor; + LineNumber = linePosSpan.StartLinePosition.Line + 1; + CharacterNumber = linePosSpan.StartLinePosition.Character + 1; + FilePath = GetInterceptorFilePath(); + + // Use the same logic used by the interceptors API for resolving the source mapped value of a path. + // https://github.com/dotnet/roslyn/blob/f290437fcc75dad50a38c09e0977cce13a64f5ba/src/Compilers/CSharp/Portable/Compilation/CSharpCompilation.cs#L1063-L1064 + string GetInterceptorFilePath() + { + SourceReferenceResolver? sourceReferenceResolver = invocation.SemanticModel?.Compilation.Options.SourceReferenceResolver; + return sourceReferenceResolver?.NormalizePath(operationSyntaxTree.FilePath, baseFilePath: null) ?? operationSyntaxTree.FilePath; + } + } + + public MethodsToGen Interceptor { get; } + public string FilePath { get; } + public int LineNumber { get; } + public int CharacterNumber { get; } + } +} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/InterceptorLocationInfo.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/InterceptorLocationInfo.cs deleted file mode 100644 index 441acbe6a7444..0000000000000 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/InterceptorLocationInfo.cs +++ /dev/null @@ -1,89 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -using System; -using System.Collections; -using System.Collections.Generic; -using Microsoft.CodeAnalysis; -using Microsoft.CodeAnalysis.CSharp.Syntax; -using Microsoft.CodeAnalysis.Operations; -using Microsoft.CodeAnalysis.Text; - -namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration -{ - internal sealed record InterceptorLocationInfo - { - public InterceptorLocationInfo(IInvocationOperation operation) - { - MemberAccessExpressionSyntax memberAccessExprSyntax = ((MemberAccessExpressionSyntax)((InvocationExpressionSyntax)operation.Syntax).Expression); - SyntaxTree operationSyntaxTree = operation.Syntax.SyntaxTree; - TextSpan memberNameSpan = memberAccessExprSyntax.Name.Span; - FileLinePositionSpan linePosSpan = operationSyntaxTree.GetLineSpan(memberNameSpan); - - LineNumber = linePosSpan.StartLinePosition.Line + 1; - CharacterNumber = linePosSpan.StartLinePosition.Character + 1; - FilePath = GetInterceptorFilePath(); - - // Use the same logic used by the interceptors API for resolving the source mapped value of a path. - // https://github.com/dotnet/roslyn/blob/f290437fcc75dad50a38c09e0977cce13a64f5ba/src/Compilers/CSharp/Portable/Compilation/CSharpCompilation.cs#L1063-L1064 - string GetInterceptorFilePath() - { - SourceReferenceResolver? sourceReferenceResolver = operation.SemanticModel?.Compilation.Options.SourceReferenceResolver; - return sourceReferenceResolver?.NormalizePath(operationSyntaxTree.FilePath, baseFilePath: null) ?? operationSyntaxTree.FilePath; - } - } - - public string FilePath { get; } - public int LineNumber { get; } - public int CharacterNumber { get; } - } - - internal sealed record ConfigurationBinderInterceptorInfo - { - private OverloadInterceptorInfo? _bind_Instance; - private OverloadInterceptorInfo? _bind_instance_BinderOptions; - private OverloadInterceptorInfo? _bind_key_instance; - - public void RegisterOverloadInfo(MethodsToGen_ConfigurationBinder overload, TypeSpec type, IInvocationOperation operation) - { - OverloadInterceptorInfo overloadInfo = DetermineOverload(overload, initIfNull: true); - overloadInfo.RegisterLocationInfo(type, operation); - } - - public OverloadInterceptorInfo GetOverloadInfo(MethodsToGen_ConfigurationBinder overload) => - DetermineOverload(overload, initIfNull: false) ?? throw new ArgumentOutOfRangeException(nameof(overload)); - - private OverloadInterceptorInfo? DetermineOverload(MethodsToGen_ConfigurationBinder overload, bool initIfNull) - { - return overload switch - { - MethodsToGen_ConfigurationBinder.Bind_instance => InitIfNull(ref _bind_Instance), - MethodsToGen_ConfigurationBinder.Bind_instance_BinderOptions => InitIfNull(ref _bind_instance_BinderOptions), - MethodsToGen_ConfigurationBinder.Bind_key_instance => InitIfNull(ref _bind_key_instance), - _ => throw new InvalidOperationException(nameof(overload)), - }; - - OverloadInterceptorInfo InitIfNull(ref OverloadInterceptorInfo? info) - { - if (initIfNull) - { - info ??= new OverloadInterceptorInfo(); - } - - return info; - } - } - } - - internal sealed record OverloadInterceptorInfo : IEnumerable>> - { - private readonly Dictionary> _typeInterceptionInfo = new(); - - public void RegisterLocationInfo(TypeSpec type, IInvocationOperation operation) => - _typeInterceptionInfo.RegisterCacheEntry(type, new InterceptorLocationInfo(operation)); - - public IEnumerator>> GetEnumerator() => _typeInterceptionInfo.GetEnumerator(); - - IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); - } -} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/MemberSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/MemberSpec.cs index effd550482595..dc5b03087ac87 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/MemberSpec.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/MemberSpec.cs @@ -3,10 +3,11 @@ using System.Diagnostics; using Microsoft.CodeAnalysis; +using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { - internal abstract record MemberSpec + public abstract record MemberSpec { public MemberSpec(ISymbol member) { @@ -18,7 +19,7 @@ public MemberSpec(ISymbol member) public string Name { get; } public string DefaultValueExpr { get; protected set; } - public required TypeSpec Type { get; init; } + public required TypeRef TypeRef { get; init; } public required string ConfigurationKeyName { get; init; } public abstract bool CanGet { get; } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/ParameterSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/ParameterSpec.cs index 0f17a6247f74d..62c781e1f1631 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/ParameterSpec.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/ParameterSpec.cs @@ -6,7 +6,7 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { - internal sealed record ParameterSpec : MemberSpec + public sealed record ParameterSpec : MemberSpec { public ParameterSpec(IParameterSymbol parameter) : base(parameter) { @@ -14,7 +14,7 @@ public ParameterSpec(IParameterSymbol parameter) : base(parameter) if (parameter.HasExplicitDefaultValue) { - string formatted = SymbolDisplay.FormatPrimitive(parameter.ExplicitDefaultValue, quoteStrings: true, useHexadecimalNumbers: false); + string formatted = SymbolDisplay.FormatPrimitive(parameter.ExplicitDefaultValue!, quoteStrings: true, useHexadecimalNumbers: false); if (formatted is not "null") { DefaultValueExpr = formatted; diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/PropertySpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/PropertySpec.cs index 4e9c468c4e335..443e39d32e493 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/PropertySpec.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Members/PropertySpec.cs @@ -5,7 +5,7 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { - internal sealed record PropertySpec : MemberSpec + public sealed record PropertySpec : MemberSpec { public PropertySpec(IPropertySymbol property) : base(property) { @@ -28,7 +28,5 @@ public PropertySpec(IPropertySymbol property) : base(property) public override bool CanGet { get; } public override bool CanSet { get; } - - public bool ShouldBindTo => CanGet || CanSet; } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/MethodsToGen.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/MethodsToGen.cs index 6165a3e6d46dc..af2a33fa6c2f8 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/MethodsToGen.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/MethodsToGen.cs @@ -16,137 +16,130 @@ public enum MethodsToGen_CoreBindingHelper Initialize = 0x10, HasValueOrChildren = 0x20, AsConfigWithChildren = 0x40, + ParsePrimitive = 0x80, } /// /// Methods on Microsoft.Extensions.Configuration.ConfigurationBinder /// [Flags] - internal enum MethodsToGen_ConfigurationBinder + public enum MethodsToGen { None = 0x0, + Any = ConfigBinder_Any | OptionsBuilderExt_Any | ServiceCollectionExt_Any, + #region IConfiguration ext. method overloads: 0x1 - 0x400 /// /// Bind(IConfiguration, object?). /// - Bind_instance = 0x1, + ConfigBinder_Bind_instance = 0x1, /// /// Bind(IConfiguration, object?, Action?). /// - Bind_instance_BinderOptions = 0x2, + ConfigBinder_Bind_instance_BinderOptions = 0x2, /// /// Bind(IConfiguration, string, object?). /// - Bind_key_instance = 0x4, + ConfigBinder_Bind_key_instance = 0x4, /// /// Get(IConfiguration). /// - Get_T = 0x8, + ConfigBinder_Get_T = 0x8, /// /// Get(IConfiguration, Action?). /// - Get_T_BinderOptions = 0x10, + ConfigBinder_Get_T_BinderOptions = 0x10, /// /// Get(IConfiguration, Type). /// - Get_TypeOf = 0x20, + ConfigBinder_Get_TypeOf = 0x20, /// /// Get(IConfiguration, Type, Action?). /// - Get_TypeOf_BinderOptions = 0x40, + ConfigBinder_Get_TypeOf_BinderOptions = 0x40, /// /// GetValue(IConfiguration, Type, string). /// - GetValue_TypeOf_key = 0x80, + ConfigBinder_GetValue_TypeOf_key = 0x80, /// /// GetValue(IConfiguration, Type, object?). /// - GetValue_TypeOf_key_defaultValue = 0x100, + ConfigBinder_GetValue_TypeOf_key_defaultValue = 0x100, /// /// GetValue(IConfiguration, string). /// - GetValue_T_key = 0x200, + ConfigBinder_GetValue_T_key = 0x200, /// /// GetValue(IConfiguration, string, T). /// - GetValue_T_key_defaultValue = 0x400, + ConfigBinder_GetValue_T_key_defaultValue = 0x400, // Method groups - Bind = Bind_instance | Bind_instance_BinderOptions | Bind_key_instance, - Get = Get_T | Get_T_BinderOptions | Get_TypeOf | Get_TypeOf_BinderOptions, - GetValue = GetValue_T_key | GetValue_T_key_defaultValue | GetValue_TypeOf_key | GetValue_TypeOf_key_defaultValue, + ConfigBinder_Bind = ConfigBinder_Bind_instance | ConfigBinder_Bind_instance_BinderOptions | ConfigBinder_Bind_key_instance, + ConfigBinder_Get = ConfigBinder_Get_T | ConfigBinder_Get_T_BinderOptions | ConfigBinder_Get_TypeOf | ConfigBinder_Get_TypeOf_BinderOptions, + ConfigBinder_GetValue = ConfigBinder_GetValue_T_key | ConfigBinder_GetValue_T_key_defaultValue | ConfigBinder_GetValue_TypeOf_key | ConfigBinder_GetValue_TypeOf_key_defaultValue, - Any = Bind | Get | GetValue, - } - - [Flags] - internal enum MethodsToGen_Extensions_OptionsBuilder - { - None = 0x0, + ConfigBinder_Any = ConfigBinder_Bind | ConfigBinder_Get | ConfigBinder_GetValue, + #endregion ConfigurationBinder ext. method overloads. + #region OptionsBuilder ext. method overloads: 0x800 - 0x2000 /// /// Bind(OptionsBuilder, IConfiguration). /// - Bind_T = 0x1, + OptionsBuilderExt_Bind_T = 0x800, /// /// Bind(OptionsBuilder, IConfiguration, Action?). /// - Bind_T_BinderOptions = 0x2, + OptionsBuilderExt_Bind_T_BinderOptions = 0x1000, /// /// BindConfiguration(OptionsBuilder, string, Action?). /// - BindConfiguration_T_path_BinderOptions = 0x4, + OptionsBuilderExt_BindConfiguration_T_path_BinderOptions = 0x2000, // Method group. BindConfiguration_T is its own method group. - Bind = Bind_T | Bind_T_BinderOptions, - - BindConfiguration = BindConfiguration_T_path_BinderOptions, + OptionsBuilderExt_Bind = OptionsBuilderExt_Bind_T | OptionsBuilderExt_Bind_T_BinderOptions, - Any = Bind | BindConfiguration, - } + OptionsBuilderExt_BindConfiguration = OptionsBuilderExt_BindConfiguration_T_path_BinderOptions, - /// - /// Methods on Microsoft.Extensions.DependencyInjection.OptionsConfigurationServiceCollectionExtensions - /// - [Flags] - public enum MethodsToGen_Extensions_ServiceCollection - { - None = 0x0, + OptionsBuilderExt_Any = OptionsBuilderExt_Bind | OptionsBuilderExt_BindConfiguration, + #endregion OptionsBuilder ext. method overloads. + #region IServiceCollection ext. method overloads: 0x4000 - 0x20000 /// /// Configure(IServiceCollection, IConfiguration). /// - Configure_T = 0x1, + ServiceCollectionExt_Configure_T = 0x4000, /// /// Configure(IServiceCollection, string, IConfiguration). /// - Configure_T_name = 0x2, + ServiceCollectionExt_Configure_T_name = 0x8000, /// /// Configure(IServiceCollection, IConfiguration, Action?). /// - Configure_T_BinderOptions = 0x4, + ServiceCollectionExt_Configure_T_BinderOptions = 0x10000, /// /// Configure(IServiceCollection, string, IConfiguration, Action?). /// - Configure_T_name_BinderOptions = 0x8, + ServiceCollectionExt_Configure_T_name_BinderOptions = 0x20000, - Configure = Configure_T | Configure_T_name | Configure_T_BinderOptions | Configure_T_name_BinderOptions, + ServiceCollectionExt_Configure = ServiceCollectionExt_Configure_T | ServiceCollectionExt_Configure_T_name | ServiceCollectionExt_Configure_T_BinderOptions | ServiceCollectionExt_Configure_T_name_BinderOptions, - Any = Configure, + ServiceCollectionExt_Any = ServiceCollectionExt_Configure, + #endregion IServiceCollection ext. method overloads: 0x4000 - 0x20000 } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/SourceGenerationSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/SourceGenerationSpec.cs index 760d57b1dcc88..4f57316429e2b 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/SourceGenerationSpec.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/SourceGenerationSpec.cs @@ -1,31 +1,14 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -using System; -using System.Collections.Generic; +using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { - internal sealed record SourceGenerationSpec + public sealed record SourceGenerationSpec { - public Dictionary> InterceptionInfo { get; } = new(); - public ConfigurationBinderInterceptorInfo InterceptionInfo_ConfigBinder { get; } = new(); - - public Dictionary> TypesForGen_CoreBindingHelper_Methods { get; } = new(); - - public HashSet PrimitivesForHelperGen { get; } = new(); - public HashSet Namespaces { get; } = new() - { - "System", - "System.CodeDom.Compiler", - "System.Globalization", - "System.Runtime.CompilerServices", - "Microsoft.Extensions.Configuration", - }; - - public MethodsToGen_CoreBindingHelper MethodsToGen_CoreBindingHelper { get; set; } - public MethodsToGen_ConfigurationBinder MethodsToGen_ConfigurationBinder { get; set; } - public MethodsToGen_Extensions_OptionsBuilder MethodsToGen_OptionsBuilderExt { get; set; } - public MethodsToGen_Extensions_ServiceCollection MethodsToGen_ServiceCollectionExt { get; set; } + public required InterceptorInfo InterceptorInfo { get; init; } + public required BindingHelperInfo BindingHelperInfo { get; init; } + public required ImmutableEquatableArray ConfigTypes { get; init; } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/TypeIndex.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/TypeIndex.cs new file mode 100644 index 0000000000000..5b59577b39292 --- /dev/null +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/TypeIndex.cs @@ -0,0 +1,122 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; +using SourceGenerators; + +namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration +{ + internal sealed class TypeIndex(IEnumerable typeSpecs) + { + private readonly Dictionary _index = typeSpecs.ToDictionary(spec => spec.TypeRef); + + public bool CanBindTo(TypeRef typeRef) => GetEffectiveTypeSpec(typeRef) switch + { + SimpleTypeSpec => true, + ComplexTypeSpec complexTypeSpec => CanInstantiate(complexTypeSpec) || HasBindableMembers(complexTypeSpec), + _ => throw new InvalidOperationException(), + }; + + public bool CanInstantiate(ComplexTypeSpec typeSpec) => typeSpec switch + { + ObjectSpec objectSpec => objectSpec is { InstantiationStrategy: not ObjectInstantiationStrategy.None, InitExceptionMessage: null }, + DictionarySpec dictionarySpec => KeyIsSupported(dictionarySpec), + CollectionSpec collectionSpec => CanBindTo(collectionSpec.ElementTypeRef), + _ => throw new InvalidOperationException(), + }; + + public bool HasBindableMembers(ComplexTypeSpec typeSpec) => + typeSpec switch + { + ObjectSpec objectSpec => objectSpec.Properties?.Any(ShouldBindTo) is true, + DictionarySpec dictSpec => KeyIsSupported(dictSpec) && CanBindTo(dictSpec.ElementTypeRef), + CollectionSpec collectionSpec => CanBindTo(collectionSpec.ElementTypeRef), + _ => throw new InvalidOperationException(), + }; + + public bool ShouldBindTo(PropertySpec property) + { + TypeSpec propTypeSpec = GetEffectiveTypeSpec(property.TypeRef); + return IsAccessible() && !IsCollectionAndCannotOverride() && !IsDictWithUnsupportedKey(); + + bool IsAccessible() => property.CanGet || property.CanSet; + + bool IsDictWithUnsupportedKey() => propTypeSpec is DictionarySpec dictionarySpec && !KeyIsSupported(dictionarySpec); + + bool IsCollectionAndCannotOverride() => !property.CanSet && + propTypeSpec is CollectionWithCtorInitSpec + { + InstantiationStrategy: CollectionInstantiationStrategy.CopyConstructor or CollectionInstantiationStrategy.LinqToDictionary + }; + } + + public TypeSpec GetEffectiveTypeSpec(TypeRef typeRef) + { + TypeSpec typeSpec = GetTypeSpec(typeRef); + return GetEffectiveTypeSpec(typeSpec); + } + + public TypeSpec GetEffectiveTypeSpec(TypeSpec typeSpec) + { + TypeRef effectiveRef = typeSpec.EffectiveTypeRef; + TypeSpec effectiveSpec = effectiveRef == typeSpec.TypeRef ? typeSpec : _index[effectiveRef]; + return effectiveSpec; + } + + public TypeSpec GetTypeSpec(TypeRef typeRef) => _index[typeRef]; + + public string GetInstantiationTypeDisplayString(CollectionWithCtorInitSpec type) + { + CollectionInstantiationConcreteType concreteType = type.InstantiationConcreteType; + return concreteType is CollectionInstantiationConcreteType.Self + ? type.DisplayString + : GetGenericTypeDisplayString(type, concreteType); + } + + public string GetPopulationCastTypeDisplayString(CollectionWithCtorInitSpec type) + { + CollectionPopulationCastType castType = type.PopulationCastType; + Debug.Assert(castType is not CollectionPopulationCastType.NotApplicable); + return GetGenericTypeDisplayString(type, castType); + } + + public string GetGenericTypeDisplayString(CollectionWithCtorInitSpec type, Enum genericProxyTypeName) + { + string proxyTypeNameStr = genericProxyTypeName.ToString(); + string elementTypeDisplayString = GetTypeSpec(type.ElementTypeRef).DisplayString; + + if (type is EnumerableSpec) + { + return $"{proxyTypeNameStr}<{elementTypeDisplayString}>"; + } + + string keyTypeDisplayString = GetTypeSpec(((DictionarySpec)type).KeyTypeRef).DisplayString; + return $"{proxyTypeNameStr}<{keyTypeDisplayString}, {elementTypeDisplayString}>"; + } + + public bool KeyIsSupported(DictionarySpec typeSpec) => + // Only types that are parsable from string are supported. + // Nullable keys not allowed; that would cause us to emit + // code that violates dictionary key notnull constraint. + GetTypeSpec(typeSpec.KeyTypeRef) is ParsableFromStringSpec; + + public static string GetConfigKeyCacheFieldName(ObjectSpec type) => $"s_configKeys_{type.IdentifierCompatibleSubstring}"; + + public static string GetParseMethodName(ParsableFromStringSpec type) + { + Debug.Assert(type.StringParsableTypeKind is not StringParsableTypeKind.AssignFromSectionValue); + + string displayString = type.DisplayString; + + string parseMethod = type.StringParsableTypeKind is StringParsableTypeKind.ByteArray + ? "ParseByteArray" + // MinimalDisplayString.Length is certainly > 2. + : $"Parse{(char.ToUpper(displayString[0]) + displayString.Substring(1)).Replace(".", "")}"; + + return parseMethod; + } + } +} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/CollectionSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/CollectionSpec.cs index f565d245cc550..f891328f77af7 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/CollectionSpec.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/CollectionSpec.cs @@ -2,48 +2,67 @@ // The .NET Foundation licenses this file to you under the MIT license. using Microsoft.CodeAnalysis; +using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { internal abstract record CollectionSpec : ComplexTypeSpec { - public CollectionSpec(ITypeSymbol type) : base(type) { } + protected CollectionSpec(ITypeSymbol type) : base(type) { } - public sealed override bool CanInstantiate => TypeToInstantiate?.CanInstantiate ?? InstantiationStrategy is not InstantiationStrategy.None; + public required TypeRef ElementTypeRef { get; init; } - public required TypeSpec ElementType { get; init; } + } + + internal abstract record CollectionWithCtorInitSpec : CollectionSpec + { + protected CollectionWithCtorInitSpec(ITypeSymbol type) : base(type) { } - public required CollectionPopulationStrategy PopulationStrategy { get; init; } + public required CollectionInstantiationStrategy InstantiationStrategy { get; init; } - public required CollectionSpec? TypeToInstantiate { get; init; } + public required CollectionInstantiationConcreteType InstantiationConcreteType { get; init; } - public required CollectionSpec? PopulationCastType { get; init; } + public required CollectionPopulationCastType PopulationCastType { get; init; } } - internal sealed record EnumerableSpec : CollectionSpec + internal sealed record ArraySpec : CollectionSpec { - public EnumerableSpec(ITypeSymbol type) : base(type) { } - - public override TypeSpecKind SpecKind => TypeSpecKind.Enumerable; + public ArraySpec(ITypeSymbol type) : base(type) { } + } - public override bool HasBindableMembers => PopulationStrategy is not CollectionPopulationStrategy.Unknown && ElementType.CanBindTo; + internal sealed record EnumerableSpec : CollectionWithCtorInitSpec + { + public EnumerableSpec(ITypeSymbol type) : base(type) { } } - internal sealed record DictionarySpec : CollectionSpec + internal sealed record DictionarySpec : CollectionWithCtorInitSpec { public DictionarySpec(INamedTypeSymbol type) : base(type) { } - public override TypeSpecKind SpecKind => TypeSpecKind.Dictionary; + public required TypeRef KeyTypeRef { get; init; } + } - public override bool HasBindableMembers => PopulationStrategy is not CollectionPopulationStrategy.Unknown; + internal enum CollectionInstantiationStrategy + { + NotApplicable = 0, + ParameterlessConstructor = 1, + CopyConstructor = 2, + LinqToDictionary = 3, + } - public required ParsableFromStringSpec KeyType { get; init; } + internal enum CollectionInstantiationConcreteType + { + Self = 0, + Dictionary = 1, + List = 2, + HashSet = 3, } - internal enum CollectionPopulationStrategy + internal enum CollectionPopulationCastType { - Unknown = 0, - Add = 1, - Cast_Then_Add = 2, + NotApplicable = 0, + IDictionary = 1, + ICollection = 2, + ISet = 3, } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/ComplexTypeSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/ComplexTypeSpec.cs deleted file mode 100644 index da5a5130141a5..0000000000000 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/ComplexTypeSpec.cs +++ /dev/null @@ -1,29 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -using Microsoft.CodeAnalysis; - -namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration -{ - internal abstract record ComplexTypeSpec : TypeSpec - { - public ComplexTypeSpec(ITypeSymbol type) : base(type) { } - - public InstantiationStrategy InstantiationStrategy { get; set; } - - public sealed override bool CanBindTo => CanInstantiate || HasBindableMembers; - - public sealed override TypeSpec EffectiveType => this; - - public abstract bool HasBindableMembers { get; } - } - - internal enum InstantiationStrategy - { - None = 0, - ParameterlessConstructor = 1, - ParameterizedConstructor = 2, - ToEnumerableMethod = 3, - Array = 4, - } -} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/NullableSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/NullableSpec.cs deleted file mode 100644 index 3de6d7d465ad9..0000000000000 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/NullableSpec.cs +++ /dev/null @@ -1,22 +0,0 @@ -// Licensed to the .NET Foundation under one or more agreements. -// The .NET Foundation licenses this file to you under the MIT license. - -using Microsoft.CodeAnalysis; - -namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration -{ - internal sealed record NullableSpec : TypeSpec - { - private readonly TypeSpec _underlyingType; - - public NullableSpec(ITypeSymbol type, TypeSpec underlyingType) : base(type) => _underlyingType = underlyingType; - - public override bool CanBindTo => _underlyingType.CanBindTo; - - public override bool CanInstantiate => _underlyingType.CanInstantiate; - - public override TypeSpecKind SpecKind => TypeSpecKind.Nullable; - - public override TypeSpec EffectiveType => _underlyingType; - } -} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/ObjectSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/ObjectSpec.cs index f6978fa9cf470..abc01258d4190 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/ObjectSpec.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/ObjectSpec.cs @@ -1,27 +1,39 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -using System; -using System.Collections.Generic; -using System.Linq; using Microsoft.CodeAnalysis; +using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { - internal sealed record ObjectSpec : ComplexTypeSpec + public sealed record ObjectSpec : ComplexTypeSpec { - public ObjectSpec(INamedTypeSymbol type) : base(type) { } - - public override TypeSpecKind SpecKind => TypeSpecKind.Object; - - public override bool HasBindableMembers => Properties.Values.Any(p => p.ShouldBindTo); - - public override bool CanInstantiate => InstantiationStrategy is not InstantiationStrategy.None && InitExceptionMessage is null; - - public Dictionary Properties { get; } = new(StringComparer.OrdinalIgnoreCase); - - public List ConstructorParameters { get; } = new(); + public ObjectSpec( + INamedTypeSymbol type, + ObjectInstantiationStrategy instantiationStrategy, + ImmutableEquatableArray? properties, + ImmutableEquatableArray? constructorParameters, + string? initExceptionMessage) : base(type) + { + InstantiationStrategy = instantiationStrategy; + Properties = properties; + ConstructorParameters = constructorParameters; + InitExceptionMessage = initExceptionMessage; + } + + public ObjectInstantiationStrategy InstantiationStrategy { get; } + + public ImmutableEquatableArray? Properties { get; } + + public ImmutableEquatableArray? ConstructorParameters { get; } + + public string? InitExceptionMessage { get; } + } - public string? InitExceptionMessage { get; set; } + public enum ObjectInstantiationStrategy + { + None = 0, + ParameterlessConstructor = 1, + ParameterizedConstructor = 2, } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/SimpleTypeSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/SimpleTypeSpec.cs index 2dfe08dc5f547..70c7a8042e035 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/SimpleTypeSpec.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/SimpleTypeSpec.cs @@ -1,55 +1,28 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. -using System.Diagnostics; using Microsoft.CodeAnalysis; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { - internal abstract record SimpleTypeSpec : TypeSpec + public abstract record SimpleTypeSpec : TypeSpec { public SimpleTypeSpec(ITypeSymbol type) : base(type) { } - - public sealed override bool CanBindTo => true; - - public sealed override TypeSpec EffectiveType => this; - - public sealed override bool CanInstantiate => true; } internal sealed record ConfigurationSectionSpec : SimpleTypeSpec { public ConfigurationSectionSpec(ITypeSymbol type) : base(type) { } - - public override TypeSpecKind SpecKind => TypeSpecKind.IConfigurationSection; } - internal sealed record ParsableFromStringSpec : SimpleTypeSpec + public sealed record ParsableFromStringSpec : SimpleTypeSpec { public ParsableFromStringSpec(ITypeSymbol type) : base(type) { } - public override TypeSpecKind SpecKind => TypeSpecKind.ParsableFromString; - public required StringParsableTypeKind StringParsableTypeKind { get; init; } - - private string? _parseMethodName; - public string ParseMethodName - { - get - { - Debug.Assert(StringParsableTypeKind is not StringParsableTypeKind.AssignFromSectionValue); - - _parseMethodName ??= StringParsableTypeKind is StringParsableTypeKind.ByteArray - ? "ParseByteArray" - // MinimalDisplayString.Length is certainly > 2. - : $"Parse{(char.ToUpper(DisplayString[0]) + DisplayString.Substring(1)).Replace(".", "")}"; - - return _parseMethodName; - } - } } - internal enum StringParsableTypeKind + public enum StringParsableTypeKind { None = 0, diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/TypeSpec.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/TypeSpec.cs index 651a40639f0ce..1c243ae1cdc7c 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/TypeSpec.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/gen/Specs/Types/TypeSpec.cs @@ -3,27 +3,26 @@ using System.Diagnostics; using Microsoft.CodeAnalysis; +using SourceGenerators; namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { [DebuggerDisplay("Name={DisplayString}, Kind={SpecKind}")] - internal abstract record TypeSpec + public abstract record TypeSpec { - private static readonly SymbolDisplayFormat s_minimalDisplayFormat = new SymbolDisplayFormat( - globalNamespaceStyle: SymbolDisplayGlobalNamespaceStyle.Omitted, - typeQualificationStyle: SymbolDisplayTypeQualificationStyle.NameAndContainingTypes, - genericsOptions: SymbolDisplayGenericsOptions.IncludeTypeParameters, - miscellaneousOptions: SymbolDisplayMiscellaneousOptions.UseSpecialTypes); - public TypeSpec(ITypeSymbol type) { - Namespace = type.ContainingNamespace?.ToDisplayString(); - DisplayString = type.ToDisplayString(s_minimalDisplayFormat); - Name = (Namespace is null ? string.Empty : Namespace + ".") + DisplayString.Replace(".", "+"); + TypeRef = new TypeRef(type); + EffectiveTypeRef = TypeRef; // Overriden by NullableSpec. + (Namespace, DisplayString, Name) = type.GetTypeName(); IdentifierCompatibleSubstring = type.ToIdentifierCompatibleSubstring(); IsValueType = type.IsValueType; } + public TypeRef TypeRef { get; } + + public TypeRef EffectiveTypeRef { get; protected init; } + public string Name { get; } public string DisplayString { get; } @@ -33,24 +32,35 @@ public TypeSpec(ITypeSymbol type) public string? Namespace { get; } public bool IsValueType { get; } + } - public abstract TypeSpecKind SpecKind { get; } + public abstract record ComplexTypeSpec : TypeSpec + { + protected ComplexTypeSpec(ITypeSymbol type) : base(type) { } + } - public abstract bool CanBindTo { get; } + internal sealed record NullableSpec : TypeSpec + { + public NullableSpec(ITypeSymbol type, TypeRef underlyingTypeRef) : base(type) => + EffectiveTypeRef = underlyingTypeRef; + } - public abstract bool CanInstantiate { get; } + internal sealed record UnsupportedTypeSpec : TypeSpec + { + public UnsupportedTypeSpec(ITypeSymbol type) : base(type) { } - public abstract TypeSpec EffectiveType { get; } + public required NotSupportedReason NotSupportedReason { get; init; } } - internal enum TypeSpecKind + public enum NotSupportedReason { - Unknown = 0, - ParsableFromString = 1, - Object = 2, - Enumerable = 3, - Dictionary = 4, - IConfigurationSection = 5, - Nullable = 6, + UnknownType = 1, + MissingPublicInstanceConstructor = 2, + CollectionNotSupported = 3, + DictionaryKeyNotSupported = 4, + ElementTypeNotSupported = 5, + MultipleParameterizedConstructors = 6, + MultiDimArraysNotSupported = 7, + NullableUnderlyingTypeNotSupported = 8, } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/Collections.generated.txt b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/Collections.generated.txt index ddd52c68b9989..ea4fba79cbc46 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/Collections.generated.txt +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/Collections.generated.txt @@ -37,7 +37,7 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration #endregion IConfiguration extensions. #region Core binding extensions. - private readonly static Lazy> s_configKeys_ProgramMyClassWithCustomCollections = new(() => new HashSet(StringComparer.OrdinalIgnoreCase) { "CustomDictionary", "CustomList", "IReadOnlyList", "IReadOnlyDictionary" }); + private readonly static Lazy> s_configKeys_ProgramMyClassWithCustomCollections = new(() => new HashSet(StringComparer.OrdinalIgnoreCase) { "CustomDictionary", "CustomList", "ICustomDictionary", "ICustomCollection", "IReadOnlyList", "UnsupportedIReadOnlyDictionaryUnsupported", "IReadOnlyDictionary" }); public static object? GetCore(this IConfiguration configuration, Type type, Action? configureOptions) { @@ -85,28 +85,6 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration } } - public static void BindCore(IConfiguration configuration, ref List instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) - { - foreach (IConfigurationSection section in configuration.GetChildren()) - { - if (section.Value is string value) - { - instance.Add(ParseInt(value, () => section.Path)); - } - } - } - - public static void BindCore(IConfiguration configuration, ref ICollection instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) - { - foreach (IConfigurationSection section in configuration.GetChildren()) - { - if (section.Value is string value) - { - instance.Add(ParseInt(value, () => section.Path)); - } - } - } - public static void BindCore(IConfiguration configuration, ref IReadOnlyList instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) { if (instance is not ICollection temp) @@ -123,28 +101,6 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration } } - public static void BindCore(IConfiguration configuration, ref Dictionary instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) - { - foreach (IConfigurationSection section in configuration.GetChildren()) - { - if (section.Value is string value) - { - instance[section.Key] = ParseInt(value, () => section.Path); - } - } - } - - public static void BindCore(IConfiguration configuration, ref IDictionary instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) - { - foreach (IConfigurationSection section in configuration.GetChildren()) - { - if (section.Value is string value) - { - instance[section.Key] = ParseInt(value, () => section.Path); - } - } - } - public static void BindCore(IConfiguration configuration, ref IReadOnlyDictionary instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) { if (instance is not IDictionary temp) @@ -184,7 +140,7 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration if (AsConfigWithChildren(configuration.GetSection("IReadOnlyList")) is IConfigurationSection section7) { IReadOnlyList? temp9 = instance.IReadOnlyList; - temp9 = temp9 is null ? new List() : new List(temp9); + temp9 = temp9 is null ? (IReadOnlyList)new List() : (IReadOnlyList)new List(temp9); BindCore(section7, ref temp9, defaultValueIfNotFound: false, binderOptions); instance.IReadOnlyList = temp9; } @@ -192,7 +148,7 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration if (AsConfigWithChildren(configuration.GetSection("IReadOnlyDictionary")) is IConfigurationSection section10) { IReadOnlyDictionary? temp12 = instance.IReadOnlyDictionary; - temp12 = temp12 is null ? new Dictionary() : temp12.ToDictionary(pair => pair.Key, pair => pair.Value); + temp12 = temp12 is null ? (IReadOnlyDictionary)new Dictionary() : (IReadOnlyDictionary)temp12.ToDictionary(pair => pair.Key, pair => pair.Value); BindCore(section10, ref temp12, defaultValueIfNotFound: false, binderOptions); instance.IReadOnlyDictionary = temp12; } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get.generated.txt b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get.generated.txt index 5e7eeae29254a..b6fb659d544d4 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get.generated.txt +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get.generated.txt @@ -95,7 +95,15 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration public static void BindCore(IConfiguration configuration, ref int[] instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) { var temp2 = new List(); - BindCore(configuration, ref temp2, defaultValueIfNotFound: false, binderOptions); + + foreach (IConfigurationSection section in configuration.GetChildren()) + { + if (section.Value is string value) + { + temp2.Add(ParseInt(value, () => section.Path)); + } + } + int originalCount = instance.Length; Array.Resize(ref instance, originalCount + temp2.Count); temp2.CopyTo(instance, originalCount); @@ -116,42 +124,42 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { ValidateConfigurationKeys(typeof(Program.MyClass), s_configKeys_ProgramMyClass, configuration, binderOptions); - if (configuration["MyString"] is string value4) + if (configuration["MyString"] is string value3) { - instance.MyString = value4; + instance.MyString = value3; } - if (configuration["MyInt"] is string value5) + if (configuration["MyInt"] is string value4) { - instance.MyInt = ParseInt(value5, () => configuration.GetSection("MyInt").Path); + instance.MyInt = ParseInt(value4, () => configuration.GetSection("MyInt").Path); } else if (defaultValueIfNotFound) { instance.MyInt = default; } - if (AsConfigWithChildren(configuration.GetSection("MyList")) is IConfigurationSection section6) + if (AsConfigWithChildren(configuration.GetSection("MyList")) is IConfigurationSection section5) { - List? temp8 = instance.MyList; - temp8 ??= new List(); - BindCore(section6, ref temp8, defaultValueIfNotFound: false, binderOptions); - instance.MyList = temp8; + List? temp7 = instance.MyList; + temp7 ??= new List(); + BindCore(section5, ref temp7, defaultValueIfNotFound: false, binderOptions); + instance.MyList = temp7; } - if (AsConfigWithChildren(configuration.GetSection("MyArray")) is IConfigurationSection section9) + if (AsConfigWithChildren(configuration.GetSection("MyArray")) is IConfigurationSection section8) { - int[]? temp11 = instance.MyArray; - temp11 ??= new int[0]; - BindCore(section9, ref temp11, defaultValueIfNotFound: false, binderOptions); - instance.MyArray = temp11; + int[]? temp10 = instance.MyArray; + temp10 ??= new int[0]; + BindCore(section8, ref temp10, defaultValueIfNotFound: false, binderOptions); + instance.MyArray = temp10; } - if (AsConfigWithChildren(configuration.GetSection("MyDictionary")) is IConfigurationSection section12) + if (AsConfigWithChildren(configuration.GetSection("MyDictionary")) is IConfigurationSection section11) { - Dictionary? temp14 = instance.MyDictionary; - temp14 ??= new Dictionary(); - BindCore(section12, ref temp14, defaultValueIfNotFound: false, binderOptions); - instance.MyDictionary = temp14; + Dictionary? temp13 = instance.MyDictionary; + temp13 ??= new Dictionary(); + BindCore(section11, ref temp13, defaultValueIfNotFound: false, binderOptions); + instance.MyDictionary = temp13; } } @@ -159,9 +167,9 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { ValidateConfigurationKeys(typeof(Program.MyClass2), s_configKeys_ProgramMyClass2, configuration, binderOptions); - if (configuration["MyInt"] is string value15) + if (configuration["MyInt"] is string value14) { - instance.MyInt = ParseInt(value15, () => configuration.GetSection("MyInt").Path); + instance.MyInt = ParseInt(value14, () => configuration.GetSection("MyInt").Path); } else if (defaultValueIfNotFound) { diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_PrimitivesOnly.generated.txt b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_PrimitivesOnly.generated.txt new file mode 100644 index 0000000000000..b703fb5f1c864 --- /dev/null +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_PrimitivesOnly.generated.txt @@ -0,0 +1,182 @@ +// +#nullable enable +#pragma warning disable CS0612, CS0618 // Suppress warnings about [Obsolete] member usage in generated code. + +namespace System.Runtime.CompilerServices +{ + using System; + using System.CodeDom.Compiler; + + [GeneratedCode("Microsoft.Extensions.Configuration.Binder.SourceGeneration", "42.42.42.42")] + [AttributeUsage(AttributeTargets.Method, AllowMultiple = true)] + file sealed class InterceptsLocationAttribute : Attribute + { + public InterceptsLocationAttribute(string filePath, int line, int column) + { + } + } +} + +namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration +{ + using Microsoft.Extensions.Configuration; + using System; + using System.CodeDom.Compiler; + using System.Globalization; + using System.Runtime.CompilerServices; + + [GeneratedCode("Microsoft.Extensions.Configuration.Binder.SourceGeneration", "42.42.42.42")] + file static class BindingExtensions + { + #region IConfiguration extensions. + /// Attempts to bind the configuration instance to a new instance of type T. + [InterceptsLocation(@"src-0.cs", 10, 16)] + public static T? Get(this IConfiguration configuration) => (T?)(GetCore(configuration, typeof(T), configureOptions: null) ?? default(T)); + + /// Attempts to bind the configuration instance to a new instance of type T. + [InterceptsLocation(@"src-0.cs", 12, 16)] + public static T? Get(this IConfiguration configuration, Action? configureOptions) => (T?)(GetCore(configuration, typeof(T), configureOptions) ?? default(T)); + + /// Attempts to bind the configuration instance to a new instance of type T. + [InterceptsLocation(@"src-0.cs", 11, 16)] + public static object? Get(this IConfiguration configuration, Type type) => GetCore(configuration, type, configureOptions: null); + + /// Attempts to bind the configuration instance to a new instance of type T. + [InterceptsLocation(@"src-0.cs", 13, 16)] + public static object? Get(this IConfiguration configuration, Type type, Action? configureOptions) => GetCore(configuration, type, configureOptions); + #endregion IConfiguration extensions. + + #region Core binding extensions. + public static object? GetCore(this IConfiguration configuration, Type type, Action? configureOptions) + { + if (configuration is null) + { + throw new ArgumentNullException(nameof(configuration)); + } + + BinderOptions? binderOptions = GetBinderOptions(configureOptions); + + if (!HasValueOrChildren(configuration)) + { + return null; + } + + if (type == typeof(int)) + { + if (configuration is not IConfigurationSection section) + { + throw new InvalidOperationException(); + } + if (section.Value is string value) + { + return ParseInt(value, () => section.Path); + } + } + else if (type == typeof(string)) + { + if (configuration is not IConfigurationSection section) + { + throw new InvalidOperationException(); + } + return section.Value; + } + else if (type == typeof(float)) + { + if (configuration is not IConfigurationSection section) + { + throw new InvalidOperationException(); + } + if (section.Value is string value) + { + return ParseFloat(value, () => section.Path); + } + } + else if (type == typeof(double)) + { + if (configuration is not IConfigurationSection section) + { + throw new InvalidOperationException(); + } + if (section.Value is string value) + { + return ParseDouble(value, () => section.Path); + } + } + + throw new NotSupportedException($"Unable to bind to type '{type}': generator did not detect the type as input."); + } + + public static bool HasValueOrChildren(IConfiguration configuration) + { + if ((configuration as IConfigurationSection)?.Value is not null) + { + return true; + } + return AsConfigWithChildren(configuration) is not null; + } + + public static IConfiguration? AsConfigWithChildren(IConfiguration configuration) + { + foreach (IConfigurationSection _ in configuration.GetChildren()) + { + return configuration; + } + return null; + } + + public static BinderOptions? GetBinderOptions(Action? configureOptions) + { + if (configureOptions is null) + { + return null; + } + + BinderOptions binderOptions = new(); + configureOptions(binderOptions); + + if (binderOptions.BindNonPublicProperties) + { + throw new NotSupportedException($"The configuration binding source generator does not support 'BinderOptions.BindNonPublicProperties'."); + } + + return binderOptions; + } + + public static int ParseInt(string value, Func getPath) + { + try + { + return int.Parse(value, NumberStyles.Integer, CultureInfo.InvariantCulture); + } + catch (Exception exception) + { + throw new InvalidOperationException($"Failed to convert configuration value at '{getPath()}' to type '{typeof(int)}'.", exception); + } + } + + public static float ParseFloat(string value, Func getPath) + { + try + { + return float.Parse(value, NumberStyles.Float, CultureInfo.InvariantCulture); + } + catch (Exception exception) + { + throw new InvalidOperationException($"Failed to convert configuration value at '{getPath()}' to type '{typeof(float)}'.", exception); + } + } + + public static double ParseDouble(string value, Func getPath) + { + try + { + return double.Parse(value, NumberStyles.Float, CultureInfo.InvariantCulture); + } + catch (Exception exception) + { + throw new InvalidOperationException($"Failed to convert configuration value at '{getPath()}' to type '{typeof(double)}'.", exception); + } + } + #endregion Core binding extensions. + } +} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_T.generated.txt b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_T.generated.txt index 3fc5176bf50f0..c2e8f167bb475 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_T.generated.txt +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_T.generated.txt @@ -76,7 +76,15 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration public static void BindCore(IConfiguration configuration, ref int[] instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) { var temp1 = new List(); - BindCore(configuration, ref temp1, defaultValueIfNotFound: false, binderOptions); + + foreach (IConfigurationSection section in configuration.GetChildren()) + { + if (section.Value is string value) + { + temp1.Add(ParseInt(value, () => section.Path)); + } + } + int originalCount = instance.Length; Array.Resize(ref instance, originalCount + temp1.Count); temp1.CopyTo(instance, originalCount); @@ -97,42 +105,42 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { ValidateConfigurationKeys(typeof(Program.MyClass), s_configKeys_ProgramMyClass, configuration, binderOptions); - if (configuration["MyString"] is string value3) + if (configuration["MyString"] is string value2) { - instance.MyString = value3; + instance.MyString = value2; } - if (configuration["MyInt"] is string value4) + if (configuration["MyInt"] is string value3) { - instance.MyInt = ParseInt(value4, () => configuration.GetSection("MyInt").Path); + instance.MyInt = ParseInt(value3, () => configuration.GetSection("MyInt").Path); } else if (defaultValueIfNotFound) { instance.MyInt = default; } - if (AsConfigWithChildren(configuration.GetSection("MyList")) is IConfigurationSection section5) + if (AsConfigWithChildren(configuration.GetSection("MyList")) is IConfigurationSection section4) { - List? temp7 = instance.MyList; - temp7 ??= new List(); - BindCore(section5, ref temp7, defaultValueIfNotFound: false, binderOptions); - instance.MyList = temp7; + List? temp6 = instance.MyList; + temp6 ??= new List(); + BindCore(section4, ref temp6, defaultValueIfNotFound: false, binderOptions); + instance.MyList = temp6; } - if (AsConfigWithChildren(configuration.GetSection("MyArray")) is IConfigurationSection section8) + if (AsConfigWithChildren(configuration.GetSection("MyArray")) is IConfigurationSection section7) { - int[]? temp10 = instance.MyArray; - temp10 ??= new int[0]; - BindCore(section8, ref temp10, defaultValueIfNotFound: false, binderOptions); - instance.MyArray = temp10; + int[]? temp9 = instance.MyArray; + temp9 ??= new int[0]; + BindCore(section7, ref temp9, defaultValueIfNotFound: false, binderOptions); + instance.MyArray = temp9; } - if (AsConfigWithChildren(configuration.GetSection("MyDictionary")) is IConfigurationSection section11) + if (AsConfigWithChildren(configuration.GetSection("MyDictionary")) is IConfigurationSection section10) { - Dictionary? temp13 = instance.MyDictionary; - temp13 ??= new Dictionary(); - BindCore(section11, ref temp13, defaultValueIfNotFound: false, binderOptions); - instance.MyDictionary = temp13; + Dictionary? temp12 = instance.MyDictionary; + temp12 ??= new Dictionary(); + BindCore(section10, ref temp12, defaultValueIfNotFound: false, binderOptions); + instance.MyDictionary = temp12; } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_T_BinderOptions.generated.txt b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_T_BinderOptions.generated.txt index 81c23d7ceea65..cd3f237917d4e 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_T_BinderOptions.generated.txt +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Baselines/ConfigurationBinder/Get_T_BinderOptions.generated.txt @@ -76,7 +76,15 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration public static void BindCore(IConfiguration configuration, ref int[] instance, bool defaultValueIfNotFound, BinderOptions? binderOptions) { var temp1 = new List(); - BindCore(configuration, ref temp1, defaultValueIfNotFound: false, binderOptions); + + foreach (IConfigurationSection section in configuration.GetChildren()) + { + if (section.Value is string value) + { + temp1.Add(ParseInt(value, () => section.Path)); + } + } + int originalCount = instance.Length; Array.Resize(ref instance, originalCount + temp1.Count); temp1.CopyTo(instance, originalCount); @@ -97,42 +105,42 @@ namespace Microsoft.Extensions.Configuration.Binder.SourceGeneration { ValidateConfigurationKeys(typeof(Program.MyClass), s_configKeys_ProgramMyClass, configuration, binderOptions); - if (configuration["MyString"] is string value3) + if (configuration["MyString"] is string value2) { - instance.MyString = value3; + instance.MyString = value2; } - if (configuration["MyInt"] is string value4) + if (configuration["MyInt"] is string value3) { - instance.MyInt = ParseInt(value4, () => configuration.GetSection("MyInt").Path); + instance.MyInt = ParseInt(value3, () => configuration.GetSection("MyInt").Path); } else if (defaultValueIfNotFound) { instance.MyInt = default; } - if (AsConfigWithChildren(configuration.GetSection("MyList")) is IConfigurationSection section5) + if (AsConfigWithChildren(configuration.GetSection("MyList")) is IConfigurationSection section4) { - List? temp7 = instance.MyList; - temp7 ??= new List(); - BindCore(section5, ref temp7, defaultValueIfNotFound: false, binderOptions); - instance.MyList = temp7; + List? temp6 = instance.MyList; + temp6 ??= new List(); + BindCore(section4, ref temp6, defaultValueIfNotFound: false, binderOptions); + instance.MyList = temp6; } - if (AsConfigWithChildren(configuration.GetSection("MyArray")) is IConfigurationSection section8) + if (AsConfigWithChildren(configuration.GetSection("MyArray")) is IConfigurationSection section7) { - int[]? temp10 = instance.MyArray; - temp10 ??= new int[0]; - BindCore(section8, ref temp10, defaultValueIfNotFound: false, binderOptions); - instance.MyArray = temp10; + int[]? temp9 = instance.MyArray; + temp9 ??= new int[0]; + BindCore(section7, ref temp9, defaultValueIfNotFound: false, binderOptions); + instance.MyArray = temp9; } - if (AsConfigWithChildren(configuration.GetSection("MyDictionary")) is IConfigurationSection section11) + if (AsConfigWithChildren(configuration.GetSection("MyDictionary")) is IConfigurationSection section10) { - Dictionary? temp13 = instance.MyDictionary; - temp13 ??= new Dictionary(); - BindCore(section11, ref temp13, defaultValueIfNotFound: false, binderOptions); - instance.MyDictionary = temp13; + Dictionary? temp12 = instance.MyDictionary; + temp12 ??= new Dictionary(); + BindCore(section10, ref temp12, defaultValueIfNotFound: false, binderOptions); + instance.MyDictionary = temp12; } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/ConfigBindingGenTestDriver.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/ConfigBindingGenTestDriver.cs new file mode 100644 index 0000000000000..4373b404fc67f --- /dev/null +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/ConfigBindingGenTestDriver.cs @@ -0,0 +1,156 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Collections.Generic; +using System.Collections.Immutable; +using System.Diagnostics; +using System.Globalization; +using System.Linq; +using System.Reflection; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.CodeAnalysis; +using Microsoft.CodeAnalysis.CSharp; +using Microsoft.Extensions.Configuration.Binder.SourceGeneration; +using SourceGenerators.Tests; +using Xunit; + +namespace Microsoft.Extensions.SourceGeneration.Configuration.Binder.Tests +{ + [ActiveIssue("https://github.com/dotnet/runtime/issues/52062", TestPlatforms.Browser)] + public partial class ConfigurationBindingGeneratorTests : ConfigurationBinderTestsBase + { + internal sealed class ConfigBindingGenTestDriver + { + private readonly CSharpParseOptions _parseOptions; + private GeneratorDriver _generatorDriver; + private SourceGenerationSpec? _genSpec; + + private readonly LanguageVersion _langVersion; + private readonly IEnumerable? _assemblyReferences; + private Compilation _compilation = null; + + public ConfigBindingGenTestDriver( + LanguageVersion langVersion = LanguageVersion.LatestMajor, + IEnumerable? assemblyReferences = null) + { + _langVersion = langVersion; + + _assemblyReferences = assemblyReferences ?? s_compilationAssemblyRefs; + + _parseOptions = new CSharpParseOptions(langVersion).WithFeatures(new[] { + new KeyValuePair("InterceptorsPreview", "") , + new KeyValuePair("InterceptorsPreviewNamespaces", "Microsoft.Extensions.Configuration.Binder.SourceGeneration") + }); + + ConfigurationBindingGenerator generator = new() { OnSourceEmitting = spec => _genSpec = spec }; + _generatorDriver = CSharpGeneratorDriver.Create( + new ISourceGenerator[] { generator.AsSourceGenerator() }, + parseOptions: _parseOptions, + driverOptions: new GeneratorDriverOptions( + disabledOutputs: IncrementalGeneratorOutputKind.None, + trackIncrementalGeneratorSteps: true)); + } + + public async Task RunGeneratorAndUpdateCompilation(string? source = null) + { + await UpdateCompilationWithSource(source); + Assert.NotNull(_compilation); + + _generatorDriver = _generatorDriver.RunGeneratorsAndUpdateCompilation(_compilation, out Compilation outputCompilation, out _, CancellationToken.None); + GeneratorDriverRunResult runResult = _generatorDriver.GetRunResult(); + + return new ConfigBindingGenRunResult + { + OutputCompilation = outputCompilation, + Diagnostics = runResult.Diagnostics, + GeneratedSource = runResult.Results[0].GeneratedSources is { Length: not 0 } sources ? sources[0] : null, + TrackedSteps = runResult.Results[0].TrackedSteps[ConfigurationBindingGenerator.GenSpecTrackingName], + GenerationSpec = _genSpec + }; + } + + private async Task UpdateCompilationWithSource(string? source = null) + { + if (_compilation is not null && source is not null) + { + SyntaxTree newTree = CSharpSyntaxTree.ParseText(source, _parseOptions); + _compilation = _compilation.ReplaceSyntaxTree(_compilation.SyntaxTrees.First(), newTree); + } + else if (_compilation is null) + { + Assert.True(source is not null, "Generator test requires input source."); + using AdhocWorkspace workspace = RoslynTestUtils.CreateTestWorkspace(); + + Project project = RoslynTestUtils.CreateTestProject(workspace, _assemblyReferences, langVersion: _langVersion) + .WithCompilationOptions(new CSharpCompilationOptions(OutputKind.DynamicallyLinkedLibrary).WithNullableContextOptions(NullableContextOptions.Annotations)) + .WithParseOptions(_parseOptions) + .WithDocuments(new string[] { source }); + Assert.True(project.Solution.Workspace.TryApplyChanges(project.Solution)); + + _compilation = (await project.GetCompilationAsync(CancellationToken.None).ConfigureAwait(false))!; + } + } + } + } + + internal struct ConfigBindingGenRunResult + { + public required Compilation OutputCompilation { get; init; } + + public required GeneratedSourceResult? GeneratedSource { get; init; } + + /// + /// Diagnostics produced by the generator alone. Doesn't include any from other build participants. + /// + public required ImmutableArray Diagnostics { get; init; } + + public required ImmutableArray TrackedSteps { get; init; } + + public required SourceGenerationSpec? GenerationSpec { get; init; } + } + + internal enum ExpectedDiagnostics + { + None, + FromGeneratorOnly, + } + + internal static class ConfigBindingGenTestDriverExtensions + { + public static void ValidateIncrementalResult(this ConfigBindingGenRunResult result, + IncrementalStepRunReason inputReason, + IncrementalStepRunReason outputReason) + { + Assert.Collection(result.TrackedSteps, step => + { + Assert.Collection(step.Inputs, source => Assert.Equal(inputReason, source.Source.Outputs[source.OutputIndex].Reason)); + Assert.Collection(step.Outputs, output => Assert.Equal(outputReason, output.Reason)); + }); + } + + public static void ValidateDiagnostics(this ConfigBindingGenRunResult result, ExpectedDiagnostics expectedDiags) + { + ImmutableArray outputDiagnostics = result.OutputCompilation.GetDiagnostics(); + + if (expectedDiags is ExpectedDiagnostics.None) + { + foreach (Diagnostic diagnostic in outputDiagnostics) + { + Assert.True( + IsPermitted(diagnostic), + $"Generator caused dagnostic in output compilation: {diagnostic.GetMessage(CultureInfo.InvariantCulture)}."); + } + } + else + { + Debug.Assert(expectedDiags is ExpectedDiagnostics.FromGeneratorOnly); + + Assert.NotEmpty(result.Diagnostics); + Assert.False(outputDiagnostics.Any(diag => !IsPermitted(diag))); + } + + static bool IsPermitted(Diagnostic diagnostic) => diagnostic.Severity <= DiagnosticSeverity.Info; + } + } +} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Baselines.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Baselines.cs index 3c46f5f99818b..e05a773713712 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Baselines.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Baselines.cs @@ -2,6 +2,7 @@ // The .NET Foundation licenses this file to you under the MIT license. using System; +using System.Collections.Immutable; using System.Linq; using System.Threading.Tasks; using Microsoft.CodeAnalysis; @@ -141,7 +142,7 @@ public static void Main() public class MyClass { - public string MyString { get; set; } + public string? MyString { get; set; } public int MyInt { get; set; } public List MyList { get; set; } public Dictionary MyDictionary { get; set; } @@ -314,6 +315,30 @@ public class MyClass4 await VerifyAgainstBaselineUsingFile("Get.generated.txt", source, extType: ExtensionClassType.ConfigurationBinder); } + [Fact] + public async Task Get_PrimitivesOnly() + { + string source = """ + using Microsoft.Extensions.Configuration; + + public class Program + { + public static void Main() + { + ConfigurationBuilder configurationBuilder = new(); + IConfigurationRoot config = configurationBuilder.Build(); + + config.Get(); + config.Get(typeof(string)); + config.Get(binderOptions => { }); + config.Get(typeof(double), binderOptions => { }); + } + } + """; + + await VerifyAgainstBaselineUsingFile("Get_PrimitivesOnly.generated.txt", source, extType: ExtensionClassType.ConfigurationBinder); + } + [Fact] public async Task Get_T() { @@ -654,9 +679,9 @@ public class MyClass2 }" ; - var (d, r) = await RunGenerator(source); - Assert.Empty(r); - Assert.Empty(d); + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source); + Assert.False(result.GeneratedSource.HasValue); + Assert.Empty(result.Diagnostics); } [Fact] @@ -736,6 +761,7 @@ public static void Main() section.Get(); } + // Diagnostic warning because we don't know how to instantiate two properties on this type. public class MyClassWithCustomCollections { public CustomDictionary CustomDictionary { get; set; } @@ -743,6 +769,7 @@ public class MyClassWithCustomCollections public ICustomDictionary ICustomDictionary { get; set; } public ICustomSet ICustomCollection { get; set; } public IReadOnlyList IReadOnlyList { get; set; } + // Diagnostic warning because we don't know how to instantiate the property type. public IReadOnlyDictionary UnsupportedIReadOnlyDictionaryUnsupported { get; set; } public IReadOnlyDictionary IReadOnlyDictionary { get; set; } } @@ -755,21 +782,26 @@ public class CustomList : List { } + // Diagnostic warning because we don't know how to instantiate this type. public interface ICustomDictionary : IDictionary { } + // Diagnostic warning because we don't know how to instantiate this type. public interface ICustomSet : ISet { } } """; - await VerifyAgainstBaselineUsingFile("Collections.generated.txt", source, validateOutputCompDiags: false, assessDiagnostics: (d) => - { - Assert.Equal(3, d.Where(diag => diag.Id == Diagnostics.TypeNotSupported.Id).Count()); - Assert.Equal(6, d.Where(diag => diag.Id == Diagnostics.PropertyNotSupported.Id).Count()); - }); + ConfigBindingGenRunResult result = await VerifyAgainstBaselineUsingFile( + "Collections.generated.txt", + source, + expectedDiags: ExpectedDiagnostics.FromGeneratorOnly); + + ImmutableArray diagnostics = result.Diagnostics; + Assert.Equal(3, diagnostics.Where(diag => diag.Id == Diagnostics.TypeNotSupported.Id).Count()); + Assert.Equal(3, diagnostics.Where(diag => diag.Id == Diagnostics.PropertyNotSupported.Id).Count()); } [Fact] @@ -811,14 +843,12 @@ public abstract class AbstractType_CannotInit } """; - await VerifyAgainstBaselineUsingFile( + ConfigBindingGenRunResult result = await VerifyAgainstBaselineUsingFile( "EmptyConfigType.generated.txt", source, - assessDiagnostics: (d) => - { - Assert.Equal(2, d.Where(diag => diag.Id == Diagnostics.TypeNotSupported.Id).Count()); - }, - validateOutputCompDiags: false); + expectedDiags: ExpectedDiagnostics.FromGeneratorOnly); + + Assert.Equal(2, result.Diagnostics.Where(diag => diag.Id == Diagnostics.TypeNotSupported.Id).Count()); } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Helpers.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Helpers.cs index 9d7c1b7d9408f..cbbd34e7fc41d 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Helpers.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Helpers.cs @@ -9,10 +9,10 @@ using System.IO; using System.Linq; using System.Reflection; -using System.Threading; using System.Threading.Tasks; using Microsoft.CodeAnalysis; using Microsoft.CodeAnalysis.CSharp; +using Microsoft.CodeAnalysis.Text; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Configuration.Binder.SourceGeneration; using Microsoft.Extensions.DependencyInjection; @@ -24,6 +24,9 @@ namespace Microsoft.Extensions.SourceGeneration.Configuration.Binder.Tests { public partial class ConfigurationBindingGeneratorTests { + /// + /// Keep in sync with variants, e.g. . + /// private const string BindCallSampleCode = """ using System.Collections.Generic; using Microsoft.Extensions.Configuration; @@ -63,6 +66,7 @@ private static class Diagnostics } private static readonly Assembly[] s_compilationAssemblyRefs = new[] { + typeof(BitArray).Assembly, typeof(ConfigurationBinder).Assembly, typeof(ConfigurationBuilder).Assembly, typeof(CultureInfo).Assembly, @@ -87,18 +91,19 @@ private enum ExtensionClassType private static async Task VerifyThatSourceIsGenerated(string testSourceCode) { - var (d, r) = await RunGenerator(testSourceCode); - Assert.Equal(1, r.Length); - Assert.Empty(d); - Assert.True(r[0].SourceText.Lines.Count > 10); + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(testSourceCode); + GeneratedSourceResult? source = result.GeneratedSource; + + Assert.NotNull(source); + Assert.Empty(result.Diagnostics); + Assert.True(source.Value.SourceText.Lines.Count > 10); } - private static async Task VerifyAgainstBaselineUsingFile( + private static async Task VerifyAgainstBaselineUsingFile( string filename, string testSourceCode, - Action>? assessDiagnostics = null, ExtensionClassType extType = ExtensionClassType.None, - bool validateOutputCompDiags = true) + ExpectedDiagnostics expectedDiags = ExpectedDiagnostics.None) { string path = extType is ExtensionClassType.None ? Path.Combine("Baselines", filename) @@ -107,73 +112,52 @@ private static async Task VerifyAgainstBaselineUsingFile( string[] expectedLines = baseline.Replace("%VERSION%", typeof(ConfigurationBindingGenerator).Assembly.GetName().Version?.ToString()) .Split(Environment.NewLine); - var (d, r) = await RunGenerator(testSourceCode, validateOutputCompDiags); - bool success = RoslynTestUtils.CompareLines(expectedLines, r[0].SourceText, out string errorMessage); + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(testSourceCode); + result.ValidateDiagnostics(expectedDiags); + + SourceText resultSourceText = result.GeneratedSource.Value.SourceText; + bool resultEqualsBaseline = RoslynTestUtils.CompareLines(expectedLines, resultSourceText, out string errorMessage); #if UPDATE_BASELINES - if (!success) + if (!resultEqualsBaseline) { - const string envVarName = "RepoRootDir" - string errMessage = $"To update baselines, specify a '{envVarName}' environment variable. See this assembly's README.md doc for more details." + const string envVarName = "RepoRootDir"; + string errMessage = $"To update baselines, specify a '{envVarName}' environment variable. See this assembly's README.md doc for more details."; string? repoRootDir = Environment.GetEnvironmentVariable(envVarName); Assert.True(repoRootDir is not null, errMessage); - IEnumerable lines = r[0].SourceText.Lines.Select(l => l.ToString()); + IEnumerable lines = resultSourceText.Lines.Select(l => l.ToString()); string source = string.Join(Environment.NewLine, lines).TrimEnd(Environment.NewLine.ToCharArray()) + Environment.NewLine; path = Path.Combine($"{repoRootDir}\\src\\libraries\\Microsoft.Extensions.Configuration.Binder\\tests\\SourceGenerationTests\\", path); await File.WriteAllTextAsync(path, source).ConfigureAwait(false); - success = true; + resultEqualsBaseline = true; } #endif - Assert.Single(r); - (assessDiagnostics ?? ((d) => Assert.Empty(d))).Invoke(d); - Assert.True(success, errorMessage); + Assert.True(resultEqualsBaseline, errorMessage); + + return result; } - private static async Task<(ImmutableArray, ImmutableArray)> RunGenerator( - string testSourceCode, - bool validateOutputCompDiags = false, + private static async Task RunGeneratorAndUpdateCompilation( + string source, LanguageVersion langVersion = LanguageVersion.CSharp12, - IEnumerable? references = null) + IEnumerable? assemblyReferences = null) { - using var workspace = RoslynTestUtils.CreateTestWorkspace(); - CSharpParseOptions parseOptions = new CSharpParseOptions(langVersion).WithFeatures(new[] { - new KeyValuePair("InterceptorsPreview", ""), - new KeyValuePair("InterceptorsPreviewNamespaces", "Microsoft.Extensions.Configuration.Binder.SourceGeneration") - }); - - Project proj = RoslynTestUtils.CreateTestProject(workspace, references ?? s_compilationAssemblyRefs, langVersion: langVersion) - .WithCompilationOptions(new CSharpCompilationOptions(OutputKind.DynamicallyLinkedLibrary).WithNullableContextOptions(NullableContextOptions.Annotations)) - .WithDocuments(new string[] { testSourceCode }) - .WithParseOptions(parseOptions); - - Assert.True(proj.Solution.Workspace.TryApplyChanges(proj.Solution)); - - Compilation comp = await proj.GetCompilationAsync(CancellationToken.None).ConfigureAwait(false); - CSharpGeneratorDriver cgd = CSharpGeneratorDriver.Create(new[] { new ConfigurationBindingGenerator().AsSourceGenerator() }, parseOptions: parseOptions); - GeneratorDriver gd = cgd.RunGeneratorsAndUpdateCompilation(comp, out Compilation outputCompilation, out _, CancellationToken.None); - GeneratorDriverRunResult runResult = gd.GetRunResult(); - - if (validateOutputCompDiags) - { - ImmutableArray diagnostics = outputCompilation.GetDiagnostics(); - Assert.False(diagnostics.Any(d => d.Severity > DiagnosticSeverity.Info)); - } - - return (runResult.Results[0].Diagnostics, runResult.Results[0].GeneratedSources); + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(langVersion, assemblyReferences); + return await driver.RunGeneratorAndUpdateCompilation(source); } - public static List GetAssemblyRefsWithAdditional(params Type[] additional) + private static List GetAssemblyRefsWithAdditional(params Type[] additional) { List assemblies = new(s_compilationAssemblyRefs); assemblies.AddRange(additional.Select(t => t.Assembly)); return assemblies; } - public static HashSet GetFilteredAssemblyRefs(IEnumerable exclusions) + private static HashSet GetFilteredAssemblyRefs(IEnumerable exclusions) { HashSet assemblies = new(s_compilationAssemblyRefs); foreach (Type exclusion in exclusions) diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Incremental.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Incremental.cs new file mode 100644 index 0000000000000..aff9a0c20364c --- /dev/null +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.Incremental.cs @@ -0,0 +1,362 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Threading.Tasks; +using Microsoft.CodeAnalysis; +using Microsoft.Extensions.Configuration.Binder.SourceGeneration; +using SourceGenerators.Tests; +using Xunit; + +namespace Microsoft.Extensions.SourceGeneration.Configuration.Binder.Tests +{ + public partial class ConfigurationBindingGeneratorTests : ConfigurationBinderTestsBase + { + [ActiveIssue("https://github.com/dotnet/runtime/issues/52062", TestPlatforms.Browser)] + public sealed class IncrementalTests + { + [Fact] + public async Task CompilingTheSameSourceResultsInEqualModels() + { + SourceGenerationSpec spec1 = (await new ConfigBindingGenTestDriver().RunGeneratorAndUpdateCompilation(BindCallSampleCode)).GenerationSpec; + SourceGenerationSpec spec2 = (await new ConfigBindingGenTestDriver().RunGeneratorAndUpdateCompilation(BindCallSampleCode)).GenerationSpec; + + Assert.NotSame(spec1, spec2); + GeneratorTestHelpers.AssertStructurallyEqual(spec1, spec2); + + Assert.Equal(spec1, spec2); + Assert.Equal(spec1.GetHashCode(), spec2.GetHashCode()); + } + + [Fact] + public async Task RunWithNoDiags_Then_NoEdit() + { + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(); + + ConfigBindingGenRunResult result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCode); + result.ValidateIncrementalResult(IncrementalStepRunReason.New, IncrementalStepRunReason.New); + + result = await driver.RunGeneratorAndUpdateCompilation(); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Unchanged); + } + + [Fact] + public async Task RunWithNoDiags_Then_ChangeInputOrder() + { + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(); + + ConfigBindingGenRunResult result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCode); + result.ValidateIncrementalResult(IncrementalStepRunReason.New, IncrementalStepRunReason.New); + + // We expect different spec because diag locations are different. + result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_ReorderedInvocations); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Modified); + + // We expect different spec because members are reordered. + result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_ReorderedConfigTypeMembers); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Modified); + } + + [Fact] + public async Task RunWithNoDiags_Then_EditWithNoDiags() + { + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(); + + ConfigBindingGenRunResult result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCode); + result.ValidateIncrementalResult(IncrementalStepRunReason.New, IncrementalStepRunReason.New); + + result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithDifferentConfigTypeName); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Modified); + } + + [Fact] + public async Task RunWithNoDiags_Then_EditWithDiags() + { + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(); + + ConfigBindingGenRunResult result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCode); + result.ValidateIncrementalResult(IncrementalStepRunReason.New, IncrementalStepRunReason.New); + + result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithUnsupportedMember); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Modified); + } + + [Fact] + public async Task RunWithDiags_Then_NoEdit() + { + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(); + + ConfigBindingGenRunResult result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithUnsupportedMember); + result.ValidateIncrementalResult(IncrementalStepRunReason.New, IncrementalStepRunReason.New); + + result = await driver.RunGeneratorAndUpdateCompilation(); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Unchanged); + } + + [Fact] + public async Task RunWithDiags_Then_ChangeInputOrder() + { + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(); + + ConfigBindingGenRunResult result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithUnsupportedMember); + result.ValidateIncrementalResult(IncrementalStepRunReason.New, IncrementalStepRunReason.New); + + // We expect different spec because diag locations are different. + result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithUnsupportedMember_ReorderedInvocations); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Modified); + + // We expect different spec because members are reordered. + result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithUnsupportedMember_ReorderedConfigTypeMembers); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Modified); + } + + [Fact] + public async Task RunWithDiags_Then_EditWithNoDiags() + { + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(); + + ConfigBindingGenRunResult result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithUnsupportedMember); + result.ValidateIncrementalResult(IncrementalStepRunReason.New, IncrementalStepRunReason.New); + + result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCode); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Modified); + } + + [Fact] + public async Task RunWithDiags_Then_EditWithDiags() + { + ConfigBindingGenTestDriver driver = new ConfigBindingGenTestDriver(); + + ConfigBindingGenRunResult result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithUnsupportedMember); + result.ValidateIncrementalResult(IncrementalStepRunReason.New, IncrementalStepRunReason.New); + + result = await driver.RunGeneratorAndUpdateCompilation(BindCallSampleCodeVariant_WithUnsupportedMember_WithDiffMemberName); + result.ValidateIncrementalResult(IncrementalStepRunReason.Modified, IncrementalStepRunReason.Modified); + } + } + + #region Incremental test sources. + /// + /// Keep in sync with . + /// + private const string BindCallSampleCodeVariant_ReorderedInvocations = """ + using System.Collections.Generic; + using Microsoft.Extensions.Configuration; + + public class Program + { + public static void Main() + { + ConfigurationBuilder configurationBuilder = new(); + IConfigurationRoot config = configurationBuilder.Build(); + + MyClass configObj = new(); + config.Bind(configObj, options => { }); + config.Bind("key", configObj); + config.Bind(configObj); + } + + public class MyClass + { + public string MyString { get; set; } + public int MyInt { get; set; } + public List MyList { get; set; } + public Dictionary MyDictionary { get; set; } + public Dictionary MyComplexDictionary { get; set; } + } + + public class MyClass2 { } + } + """; + + /// + /// Keep in sync with . + /// + private const string BindCallSampleCodeVariant_ReorderedConfigTypeMembers = """ + using System.Collections.Generic; + using Microsoft.Extensions.Configuration; + + public class Program + { + public static void Main() + { + ConfigurationBuilder configurationBuilder = new(); + IConfigurationRoot config = configurationBuilder.Build(); + + MyClass configObj = new(); + config.Bind(configObj, options => { }); + config.Bind("key", configObj); + config.Bind(configObj); + } + + public class MyClass + { + public List MyList { get; set; } + public Dictionary MyDictionary { get; set; } + public string MyString { get; set; } + public int MyInt { get; set; } + public Dictionary MyComplexDictionary { get; set; } + } + + public class MyClass2 { } + } + """; + + /// + /// Keep in sync with . + /// + private const string BindCallSampleCodeVariant_WithDifferentConfigTypeName = """ + using System.Collections.Generic; + using Microsoft.Extensions.Configuration; + + public class Program + { + public static void Main() + { + ConfigurationBuilder configurationBuilder = new(); + IConfigurationRoot config = configurationBuilder.Build(); + + MyClass0 configObj = new(); + config.Bind(configObj, options => { }); + config.Bind("key", configObj); + config.Bind(configObj); + } + + public class MyClass0 + { + public List MyList { get; set; } + public Dictionary MyDictionary { get; set; } + public string MyString { get; set; } + public int MyInt { get; set; } + public Dictionary MyComplexDictionary { get; set; } + } + + public class MyClass2 { } + } + """; + + private const string BindCallSampleCodeVariant_WithUnsupportedMember = """ + using System.Collections.Generic; + using Microsoft.Extensions.Configuration; + + public class Program + { + public static void Main() + { + ConfigurationBuilder configurationBuilder = new(); + IConfigurationRoot config = configurationBuilder.Build(); + + MyClass configObj = new(); + config.Bind(configObj); + config.Bind(configObj, options => { }); + config.Bind("key", configObj); + } + + public class MyClass + { + public string MyString { get; set; } + public int MyInt { get; set; } + public List MyList { get; set; } + public Dictionary MyDictionary { get; set; } + public Dictionary MyComplexDictionary { get; set; } + public int[,] UnsupportedMember { get; set; } + } + + public class MyClass2 { } + } + """; + + private const string BindCallSampleCodeVariant_WithUnsupportedMember_ReorderedInvocations = """ + using System.Collections.Generic; + using Microsoft.Extensions.Configuration; + + public class Program + { + public static void Main() + { + ConfigurationBuilder configurationBuilder = new(); + IConfigurationRoot config = configurationBuilder.Build(); + + MyClass configObj = new(); + config.Bind("key", configObj); + config.Bind(configObj); + config.Bind(configObj, options => { }); + } + + public class MyClass + { + public string MyString { get; set; } + public int MyInt { get; set; } + public List MyList { get; set; } + public Dictionary MyDictionary { get; set; } + public Dictionary MyComplexDictionary { get; set; } + public int[,] UnsupportedMember { get; set; } + } + + public class MyClass2 { } + } + """; + + private const string BindCallSampleCodeVariant_WithUnsupportedMember_ReorderedConfigTypeMembers = """ + using System.Collections.Generic; + using Microsoft.Extensions.Configuration; + + public class Program + { + public static void Main() + { + ConfigurationBuilder configurationBuilder = new(); + IConfigurationRoot config = configurationBuilder.Build(); + + MyClass configObj = new(); + config.Bind("key", configObj); + config.Bind(configObj); + config.Bind(configObj, options => { }); + } + + public class MyClass + { + public string MyString { get; set; } + public int MyInt { get; set; } + public int[,] UnsupportedMember { get; set; } + public Dictionary MyDictionary { get; set; } + public Dictionary MyComplexDictionary { get; set; } + public List MyList { get; set; } + } + + public class MyClass2 { } + } + """; + + private const string BindCallSampleCodeVariant_WithUnsupportedMember_WithDiffMemberName = """ + using System.Collections.Generic; + using Microsoft.Extensions.Configuration; + + public class Program + { + public static void Main() + { + ConfigurationBuilder configurationBuilder = new(); + IConfigurationRoot config = configurationBuilder.Build(); + + MyClass configObj = new(); + config.Bind(configObj); + config.Bind(configObj, options => { }); + config.Bind("key", configObj); + } + + public class MyClass + { + public string MyString { get; set; } + public int MyInt { get; set; } + public List MyList { get; set; } + public Dictionary MyDictionary { get; set; } + public Dictionary MyComplexDictionary { get; set; } + public int[,] UnsupportedMember_DiffMemberName { get; set; } + } + + public class MyClass2 { } + } + """; + #endregion Incremental test sources. + } +} diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.cs b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.cs index 846e64d904d53..d93607d376399 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.cs +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/GeneratorTests.cs @@ -27,10 +27,10 @@ public partial class ConfigurationBindingGeneratorTests : ConfigurationBinderTes [InlineData(LanguageVersion.CSharp10)] public async Task LangVersionMustBeCharp12OrHigher(LanguageVersion langVersion) { - var (d, r) = await RunGenerator(BindCallSampleCode, langVersion: langVersion); - Assert.Empty(r); + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(BindCallSampleCode, langVersion: langVersion); + Assert.False(result.GeneratedSource.HasValue); - Diagnostic diagnostic = Assert.Single(d); + Diagnostic diagnostic = Assert.Single(result.Diagnostics); Assert.True(diagnostic.Id == "SYSLIB1102"); Assert.Contains("C# 12", diagnostic.Descriptor.Title.ToString(CultureInfo.InvariantCulture)); Assert.Equal(DiagnosticSeverity.Error, diagnostic.Severity); @@ -75,11 +75,11 @@ public record struct MyRecordStruct { } } """; - var (d, r) = await RunGenerator(source); - Assert.Empty(r); - Assert.Equal(7, d.Count()); + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source); + Assert.False(result.GeneratedSource.HasValue); + Assert.Equal(7, result.Diagnostics.Count()); - foreach (Diagnostic diagnostic in d) + foreach (Diagnostic diagnostic in result.Diagnostics) { Assert.True(diagnostic.Id == Diagnostics.ValueTypesInvalidForBind.Id); Assert.Contains(Diagnostics.ValueTypesInvalidForBind.Title, diagnostic.Descriptor.Title.ToString(CultureInfo.InvariantCulture)); @@ -111,11 +111,11 @@ public record struct MyRecordStruct { } } """; - var (d, r) = await RunGenerator(source); - Assert.Empty(r); - Assert.Equal(2, d.Count()); + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source); + Assert.False(result.GeneratedSource.HasValue); + Assert.Equal(2, result.Diagnostics.Count()); - foreach (Diagnostic diagnostic in d) + foreach (Diagnostic diagnostic in result.Diagnostics) { Assert.True(diagnostic.Id == Diagnostics.CouldNotDetermineTypeInfo.Id); Assert.Contains(Diagnostics.CouldNotDetermineTypeInfo.Title, diagnostic.Descriptor.Title.ToString(CultureInfo.InvariantCulture)); @@ -163,11 +163,11 @@ public class MyClass { } } """; - var (d, r) = await RunGenerator(source); - Assert.Empty(r); - Assert.Equal(6, d.Count()); + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source); + Assert.False(result.GeneratedSource.HasValue); + Assert.Equal(6, result.Diagnostics.Count()); - foreach (Diagnostic diagnostic in d) + foreach (Diagnostic diagnostic in result.Diagnostics) { Assert.True(diagnostic.Id == Diagnostics.CouldNotDetermineTypeInfo.Id); Assert.Contains(Diagnostics.CouldNotDetermineTypeInfo.Title, diagnostic.Descriptor.Title.ToString(CultureInfo.InvariantCulture)); @@ -218,22 +218,15 @@ public class MyClass0 { } async Task Test(bool expectOutput) { - var (d, r) = await RunGenerator(source, references: GetFilteredAssemblyRefs(exclusions)); - - Assert.Empty(d); - - if (expectOutput) - { - Assert.Single(r); - } - else - { - Assert.Empty(r); - } + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source, assemblyReferences: GetFilteredAssemblyRefs(exclusions)); + Assert.Empty(result.Diagnostics); + Action ValidateSourceResult = expectOutput ? () => Assert.NotNull(result.GeneratedSource) : () => Assert.False(result.GeneratedSource.HasValue); + ValidateSourceResult(); } } [Fact] + [ActiveIssue("Work out why we aren't getting all the expected diagnostics.")] public async Task IssueDiagnosticsForAllOffendingCallsites() { string source = """ @@ -282,10 +275,10 @@ public class AnotherGraphWithUnsupportedMembers } """; - var (d, r) = await RunGenerator(source, references: GetAssemblyRefsWithAdditional(typeof(ImmutableArray<>), typeof(Encoding), typeof(JsonSerializer))); - Assert.Single(r); - Assert.Equal(47, d.Where(diag => diag.Id == Diagnostics.TypeNotSupported.Id).Count()); - Assert.Equal(44, d.Where(diag => diag.Id == Diagnostics.PropertyNotSupported.Id).Count()); + ConfigBindingGenRunResult result = await RunGeneratorAndUpdateCompilation(source, assemblyReferences: GetAssemblyRefsWithAdditional(typeof(ImmutableArray<>), typeof(Encoding), typeof(JsonSerializer))); + Assert.NotNull(result.GeneratedSource); + Assert.True(result.Diagnostics.Any(diag => diag.Id == Diagnostics.TypeNotSupported.Id)); + Assert.True(result.Diagnostics.Any(diag => diag.Id == Diagnostics.PropertyNotSupported.Id)); } } } diff --git a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Microsoft.Extensions.Configuration.Binder.SourceGeneration.Tests.csproj b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Microsoft.Extensions.Configuration.Binder.SourceGeneration.Tests.csproj index fc8db157eddee..848d93b32a475 100644 --- a/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Microsoft.Extensions.Configuration.Binder.SourceGeneration.Tests.csproj +++ b/src/libraries/Microsoft.Extensions.Configuration.Binder/tests/SourceGenerationTests/Microsoft.Extensions.Configuration.Binder.SourceGeneration.Tests.csproj @@ -2,8 +2,10 @@ $(NetCoreAppCurrent);$(NetFrameworkMinimum) true - - SYSLIB1100,SYSLIB1101 + + $(NoWarn);SYSLIB1100,SYSLIB1101 + + $(NoWarn);SYSLIB1103,SYSLIB1104 $(Features);InterceptorsPreview $(InterceptorsPreviewNamespaces);Microsoft.Extensions.Configuration.Binder.SourceGeneration @@ -22,6 +24,7 @@ + @@ -46,17 +49,16 @@ - + PreserveNewest + + diff --git a/src/libraries/Microsoft.Extensions.Hosting.WindowsServices/tests/Microsoft.Extensions.Hosting.WindowsServices.Tests.csproj b/src/libraries/Microsoft.Extensions.Hosting.WindowsServices/tests/Microsoft.Extensions.Hosting.WindowsServices.Tests.csproj index ee433d9207d1d..4f2edef0f83b7 100644 --- a/src/libraries/Microsoft.Extensions.Hosting.WindowsServices/tests/Microsoft.Extensions.Hosting.WindowsServices.Tests.csproj +++ b/src/libraries/Microsoft.Extensions.Hosting.WindowsServices/tests/Microsoft.Extensions.Hosting.WindowsServices.Tests.csproj @@ -2,9 +2,8 @@ - $(NetCoreAppCurrent)-windows;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetFrameworkMinimum) true - true true true diff --git a/src/libraries/Microsoft.Extensions.Logging.Console/src/Microsoft.Extensions.Logging.Console.csproj b/src/libraries/Microsoft.Extensions.Logging.Console/src/Microsoft.Extensions.Logging.Console.csproj index 8ae9d3eaa61cf..ee8bc96621436 100644 --- a/src/libraries/Microsoft.Extensions.Logging.Console/src/Microsoft.Extensions.Logging.Console.csproj +++ b/src/libraries/Microsoft.Extensions.Logging.Console/src/Microsoft.Extensions.Logging.Console.csproj @@ -11,7 +11,8 @@ $(InterceptorsPreviewNamespaces);Microsoft.Extensions.Configuration.Binder.SourceGeneration true - true + + $(NoWarn);SYSLIB1100;SYSLIB1101 Console logger provider implementation for Microsoft.Extensions.Logging. diff --git a/src/libraries/Microsoft.Win32.Registry.AccessControl/src/Microsoft.Win32.Registry.AccessControl.csproj b/src/libraries/Microsoft.Win32.Registry.AccessControl/src/Microsoft.Win32.Registry.AccessControl.csproj index 93bef46c41d5a..7c5c26f01d893 100644 --- a/src/libraries/Microsoft.Win32.Registry.AccessControl/src/Microsoft.Win32.Registry.AccessControl.csproj +++ b/src/libraries/Microsoft.Win32.Registry.AccessControl/src/Microsoft.Win32.Registry.AccessControl.csproj @@ -1,6 +1,7 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true true Provides support for managing access and audit control lists for Microsoft.Win32.RegistryKey. diff --git a/src/libraries/Microsoft.Win32.Registry/tests/Microsoft.Win32.Registry.Tests.csproj b/src/libraries/Microsoft.Win32.Registry/tests/Microsoft.Win32.Registry.Tests.csproj index 1feb97ef8419b..337fc6392ad1e 100644 --- a/src/libraries/Microsoft.Win32.Registry/tests/Microsoft.Win32.Registry.Tests.csproj +++ b/src/libraries/Microsoft.Win32.Registry/tests/Microsoft.Win32.Registry.Tests.csproj @@ -2,7 +2,6 @@ $(DefineConstants);REGISTRY_ASSEMBLY $(NetCoreAppCurrent)-windows - true true diff --git a/src/libraries/Microsoft.Win32.SystemEvents/src/Microsoft.Win32.SystemEvents.csproj b/src/libraries/Microsoft.Win32.SystemEvents/src/Microsoft.Win32.SystemEvents.csproj index 8a6fa8533f9f0..e5586e668d250 100644 --- a/src/libraries/Microsoft.Win32.SystemEvents/src/Microsoft.Win32.SystemEvents.csproj +++ b/src/libraries/Microsoft.Win32.SystemEvents/src/Microsoft.Win32.SystemEvents.csproj @@ -1,7 +1,8 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true true Provides access to Windows system event notifications. diff --git a/src/libraries/Microsoft.Win32.SystemEvents/tests/Microsoft.Win32.SystemEvents.Tests.csproj b/src/libraries/Microsoft.Win32.SystemEvents/tests/Microsoft.Win32.SystemEvents.Tests.csproj index 1a8e130bee427..049bcbb6157a2 100644 --- a/src/libraries/Microsoft.Win32.SystemEvents/tests/Microsoft.Win32.SystemEvents.Tests.csproj +++ b/src/libraries/Microsoft.Win32.SystemEvents/tests/Microsoft.Win32.SystemEvents.Tests.csproj @@ -3,7 +3,6 @@ $(NetCoreAppCurrent)-windows;$(NetFrameworkMinimum) true true - true true true $(NetCoreAppCurrent);$(NetCoreAppCurrent)-windows - true ..\src\Resources\Strings.resx diff --git a/src/libraries/System.Data.Odbc/src/System.Data.Odbc.csproj b/src/libraries/System.Data.Odbc/src/System.Data.Odbc.csproj index 30399b2f3b692..d6cd9ee16a09e 100644 --- a/src/libraries/System.Data.Odbc/src/System.Data.Odbc.csproj +++ b/src/libraries/System.Data.Odbc/src/System.Data.Odbc.csproj @@ -1,7 +1,8 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious)-unix;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum)-unix;netstandard2.0;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum)-unix;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious)-unix;$(NetCoreAppPrevious) true $(NoWarn);CA2249;CA1838 false diff --git a/src/libraries/System.Data.OleDb/src/System.Data.OleDb.csproj b/src/libraries/System.Data.OleDb/src/System.Data.OleDb.csproj index bb549009216e3..8d4bee087440e 100644 --- a/src/libraries/System.Data.OleDb/src/System.Data.OleDb.csproj +++ b/src/libraries/System.Data.OleDb/src/System.Data.OleDb.csproj @@ -1,6 +1,7 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true $(NoWarn);CA2249 diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Instrument.common.cs b/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Instrument.common.cs index 6129c0686b250..bc813c0d4cfe4 100644 --- a/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Instrument.common.cs +++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Instrument.common.cs @@ -11,6 +11,7 @@ namespace System.Diagnostics.Metrics /// /// This class supports only the following generic parameter types: , , , , , , and /// + [DebuggerDisplay("Name = {Name}, Meter = {Meter.Name}")] public abstract partial class Instrument : Instrument where T : struct { /// diff --git a/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Meter.cs b/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Meter.cs index 60314b1d5b4c0..a6ce8e93f4816 100644 --- a/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Meter.cs +++ b/src/libraries/System.Diagnostics.DiagnosticSource/src/System/Diagnostics/Metrics/Meter.cs @@ -10,6 +10,7 @@ namespace System.Diagnostics.Metrics /// /// Meter is the class responsible for creating and tracking the Instruments. /// + [DebuggerDisplay("Name = {Name}, Instruments = {_instruments.Count}")] public class Meter : IDisposable { private static readonly List s_allMeters = new List(); diff --git a/src/libraries/System.Diagnostics.EventLog/src/System.Diagnostics.EventLog.csproj b/src/libraries/System.Diagnostics.EventLog/src/System.Diagnostics.EventLog.csproj index cc73f2a6daa4e..a03e6ca7e49f8 100644 --- a/src/libraries/System.Diagnostics.EventLog/src/System.Diagnostics.EventLog.csproj +++ b/src/libraries/System.Diagnostics.EventLog/src/System.Diagnostics.EventLog.csproj @@ -1,6 +1,7 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true true Provides the System.Diagnostics.EventLog class, which allows the applications to use the Windows event log service. diff --git a/src/libraries/System.Diagnostics.EventLog/tests/System.Diagnostics.EventLog.Tests.csproj b/src/libraries/System.Diagnostics.EventLog/tests/System.Diagnostics.EventLog.Tests.csproj index e3fb65148f8bb..3ce9ebf7bfb92 100644 --- a/src/libraries/System.Diagnostics.EventLog/tests/System.Diagnostics.EventLog.Tests.csproj +++ b/src/libraries/System.Diagnostics.EventLog/tests/System.Diagnostics.EventLog.Tests.csproj @@ -2,7 +2,6 @@ $(NetCoreAppCurrent)-windows;$(NetFrameworkMinimum) true - true diff --git a/src/libraries/System.Diagnostics.FileVersionInfo/tests/System.Diagnostics.FileVersionInfo.Tests/System.Diagnostics.FileVersionInfo.Tests.csproj b/src/libraries/System.Diagnostics.FileVersionInfo/tests/System.Diagnostics.FileVersionInfo.Tests/System.Diagnostics.FileVersionInfo.Tests.csproj index 12807ef4fc413..41cc56f94fdf7 100644 --- a/src/libraries/System.Diagnostics.FileVersionInfo/tests/System.Diagnostics.FileVersionInfo.Tests/System.Diagnostics.FileVersionInfo.Tests.csproj +++ b/src/libraries/System.Diagnostics.FileVersionInfo/tests/System.Diagnostics.FileVersionInfo.Tests/System.Diagnostics.FileVersionInfo.Tests.csproj @@ -3,7 +3,6 @@ $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser true true - true @@ -28,7 +27,7 @@ - diff --git a/src/libraries/System.Diagnostics.PerformanceCounter/src/System.Diagnostics.PerformanceCounter.csproj b/src/libraries/System.Diagnostics.PerformanceCounter/src/System.Diagnostics.PerformanceCounter.csproj index c0435f2293bb1..93f9a48b7857a 100644 --- a/src/libraries/System.Diagnostics.PerformanceCounter/src/System.Diagnostics.PerformanceCounter.csproj +++ b/src/libraries/System.Diagnostics.PerformanceCounter/src/System.Diagnostics.PerformanceCounter.csproj @@ -1,7 +1,8 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true true Provides the System.Diagnostics.PerformanceCounter class, which allows access to Windows performance counters. diff --git a/src/libraries/System.Diagnostics.Process/tests/System.Diagnostics.Process.Tests.csproj b/src/libraries/System.Diagnostics.Process/tests/System.Diagnostics.Process.Tests.csproj index c3f01adc3bc67..bac367d5b37fc 100644 --- a/src/libraries/System.Diagnostics.Process/tests/System.Diagnostics.Process.Tests.csproj +++ b/src/libraries/System.Diagnostics.Process/tests/System.Diagnostics.Process.Tests.csproj @@ -4,7 +4,6 @@ true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser true - true diff --git a/src/libraries/System.DirectoryServices.AccountManagement/src/System.DirectoryServices.AccountManagement.csproj b/src/libraries/System.DirectoryServices.AccountManagement/src/System.DirectoryServices.AccountManagement.csproj index ce36658a6eaac..0cced36afcebe 100644 --- a/src/libraries/System.DirectoryServices.AccountManagement/src/System.DirectoryServices.AccountManagement.csproj +++ b/src/libraries/System.DirectoryServices.AccountManagement/src/System.DirectoryServices.AccountManagement.csproj @@ -1,7 +1,8 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0 + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0 + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true true $(NoWarn);CA2249 diff --git a/src/libraries/System.DirectoryServices.Protocols/src/System.DirectoryServices.Protocols.csproj b/src/libraries/System.DirectoryServices.Protocols/src/System.DirectoryServices.Protocols.csproj index 11f18688ec399..5ba775da24ae5 100644 --- a/src/libraries/System.DirectoryServices.Protocols/src/System.DirectoryServices.Protocols.csproj +++ b/src/libraries/System.DirectoryServices.Protocols/src/System.DirectoryServices.Protocols.csproj @@ -1,7 +1,8 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-osx;$(NetCoreAppCurrent)-linux;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious)-osx;$(NetCoreAppPrevious)-linux;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum)-osx;$(NetCoreAppMinimum)-linux;$(NetCoreAppMinimum);netstandard2.0 + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-osx;$(NetCoreAppCurrent)-linux;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum)-osx;$(NetCoreAppMinimum)-linux;$(NetCoreAppMinimum);netstandard2.0 + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious)-osx;$(NetCoreAppPrevious)-linux;$(NetCoreAppPrevious) true true true diff --git a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapConnection.Linux.cs b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapConnection.Linux.cs index 0ce464ce2084d..3993da251428a 100644 --- a/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapConnection.Linux.cs +++ b/src/libraries/System.DirectoryServices.Protocols/src/System/DirectoryServices/Protocols/ldap/LdapConnection.Linux.cs @@ -60,8 +60,11 @@ private int InternalConnectToServer() } temp.Append(scheme); temp.Append(servers[i]); - temp.Append(':'); - temp.Append(directoryIdentifier.PortNumber); + if (!servers[i].Contains(':')) + { + temp.Append(':'); + temp.Append(directoryIdentifier.PortNumber); + } } if (temp.Length != 0) { diff --git a/src/libraries/System.DirectoryServices.Protocols/tests/DirectoryServicesProtocolsTests.cs b/src/libraries/System.DirectoryServices.Protocols/tests/DirectoryServicesProtocolsTests.cs index b31cf294f4d21..00433bae9875c 100644 --- a/src/libraries/System.DirectoryServices.Protocols/tests/DirectoryServicesProtocolsTests.cs +++ b/src/libraries/System.DirectoryServices.Protocols/tests/DirectoryServicesProtocolsTests.cs @@ -14,12 +14,12 @@ namespace System.DirectoryServices.Protocols.Tests { public partial class DirectoryServicesProtocolsTests { - internal static bool IsLdapConfigurationExist => LdapConfiguration.Configuration != null; - internal static bool IsActiveDirectoryServer => IsLdapConfigurationExist && LdapConfiguration.Configuration.IsActiveDirectoryServer; + internal static bool LdapConfigurationExists => LdapConfiguration.Configuration != null; + internal static bool IsActiveDirectoryServer => LdapConfigurationExists && LdapConfiguration.Configuration.IsActiveDirectoryServer; - internal static bool IsServerSideSortSupported => IsLdapConfigurationExist && LdapConfiguration.Configuration.SupportsServerSideSort; + internal static bool IsServerSideSortSupported => LdapConfigurationExists && LdapConfiguration.Configuration.SupportsServerSideSort; - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestInvalidFilter() { using LdapConnection connection = GetConnection(); @@ -33,7 +33,7 @@ public void TestInvalidFilter() Assert.Equal(/* LdapError.FilterError */ 0x57, ex.ErrorCode); } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestInvalidSearchDn() { using LdapConnection connection = GetConnection(); @@ -47,7 +47,7 @@ public void TestInvalidSearchDn() Assert.Equal(ResultCode.InvalidDNSyntax, ex.Response.ResultCode); } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestUnavailableCriticalExtension() { using LdapConnection connection = GetConnection(); @@ -63,7 +63,7 @@ public void TestUnavailableCriticalExtension() Assert.Equal(ResultCode.UnavailableCriticalExtension, ex.Response.ResultCode); } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestUnavailableNonCriticalExtension() { using LdapConnection connection = GetConnection(); @@ -74,10 +74,22 @@ public void TestUnavailableNonCriticalExtension() _ = (SearchResponse) connection.SendRequest(searchRequest); // Does not throw } + + [ConditionalFact(nameof(LdapConfigurationExists))] + public void TestServerWithPortNumber() + { + using LdapConnection connection = GetConnection($"{LdapConfiguration.Configuration.ServerName}:{LdapConfiguration.Configuration.Port}"); + + var searchRequest = new SearchRequest(LdapConfiguration.Configuration.SearchDn, "(objectClass=*)", SearchScope.Subtree); + + _ = (SearchResponse)connection.SendRequest(searchRequest); + // Shall succeed + } + [InlineData(60)] [InlineData(0)] [InlineData(-60)] - [ConditionalTheory(nameof(IsLdapConfigurationExist))] + [ConditionalTheory(nameof(LdapConfigurationExists))] public void TestSearchWithTimeLimit(int timeLimit) { using LdapConnection connection = GetConnection(); @@ -95,7 +107,7 @@ public void TestSearchWithTimeLimit(int timeLimit) } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestAddingOU() { using (LdapConnection connection = GetConnection()) @@ -117,7 +129,7 @@ public void TestAddingOU() } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestDeleteOU() { using (LdapConnection connection = GetConnection()) @@ -142,7 +154,7 @@ public void TestDeleteOU() } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestAddAndModifyAttribute() { using (LdapConnection connection = GetConnection()) @@ -177,7 +189,7 @@ public void TestAddAndModifyAttribute() } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestNestedOUs() { using (LdapConnection connection = GetConnection()) @@ -208,7 +220,7 @@ public void TestNestedOUs() } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestAddUser() { using (LdapConnection connection = GetConnection()) @@ -260,7 +272,7 @@ public void TestAddUser() } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestAddingMultipleAttributes() { using (LdapConnection connection = GetConnection()) @@ -343,7 +355,7 @@ public void TestAddingMultipleAttributes() } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestMoveAndRenameUser() { using (LdapConnection connection = GetConnection()) @@ -402,7 +414,7 @@ public void TestMoveAndRenameUser() } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestAsyncSearch() { using (LdapConnection connection = GetConnection()) @@ -513,7 +525,7 @@ public static IEnumerable TestCompareRequestTheory_TestData() yield return new object[] { "http://example.com/", "http://false/"u8.ToArray(), ResultCode.CompareFalse }; } - [ConditionalTheory(nameof(IsLdapConfigurationExist))] + [ConditionalTheory(nameof(LdapConfigurationExists))] [MemberData(nameof(TestCompareRequestTheory_TestData))] public void TestCompareRequestTheory(object value, object assertion, ResultCode compareResult) { @@ -546,7 +558,7 @@ public void TestCompareRequestTheory(object value, object assertion, ResultCode } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestCompareRequest() { using (LdapConnection connection = GetConnection()) @@ -670,7 +682,7 @@ public void TestSortedSearch() } } - [ConditionalFact(nameof(IsLdapConfigurationExist))] + [ConditionalFact(nameof(LdapConfigurationExists))] public void TestMultipleServerBind() { LdapDirectoryIdentifier directoryIdentifier = string.IsNullOrEmpty(LdapConfiguration.Configuration.Port) ? @@ -774,13 +786,25 @@ private SearchResultEntry SearchUser(LdapConnection connection, string rootDn, s return null; } + private LdapConnection GetConnection(string server) + { + LdapDirectoryIdentifier directoryIdentifier = new LdapDirectoryIdentifier(server, fullyQualifiedDnsHostName: true, connectionless: false); + + return GetConnection(directoryIdentifier); + } + private LdapConnection GetConnection() { LdapDirectoryIdentifier directoryIdentifier = string.IsNullOrEmpty(LdapConfiguration.Configuration.Port) ? - new LdapDirectoryIdentifier(LdapConfiguration.Configuration.ServerName, true, false) : + new LdapDirectoryIdentifier(LdapConfiguration.Configuration.ServerName, fullyQualifiedDnsHostName: true, connectionless: false) : new LdapDirectoryIdentifier(LdapConfiguration.Configuration.ServerName, int.Parse(LdapConfiguration.Configuration.Port, NumberStyles.None, CultureInfo.InvariantCulture), - true, false); + fullyQualifiedDnsHostName: true, connectionless: false); + return GetConnection(directoryIdentifier); + } + + private static LdapConnection GetConnection(LdapDirectoryIdentifier directoryIdentifier) + { NetworkCredential credential = new NetworkCredential(LdapConfiguration.Configuration.UserName, LdapConfiguration.Configuration.Password); LdapConnection connection = new LdapConnection(directoryIdentifier, credential) diff --git a/src/libraries/System.DirectoryServices/src/System.DirectoryServices.csproj b/src/libraries/System.DirectoryServices/src/System.DirectoryServices.csproj index 943cc1bc374ec..8bd18417f83a2 100644 --- a/src/libraries/System.DirectoryServices/src/System.DirectoryServices.csproj +++ b/src/libraries/System.DirectoryServices/src/System.DirectoryServices.csproj @@ -1,7 +1,8 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0 + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0 + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true true $(NoWarn);IDE0059;IDE0060;CA1822 diff --git a/src/libraries/System.Formats.Cbor/tests/Reader/CborReaderTests.Tag.cs b/src/libraries/System.Formats.Cbor/tests/Reader/CborReaderTests.Tag.cs index af9adfbe67b50..85535aec0fbdb 100644 --- a/src/libraries/System.Formats.Cbor/tests/Reader/CborReaderTests.Tag.cs +++ b/src/libraries/System.Formats.Cbor/tests/Reader/CborReaderTests.Tag.cs @@ -2,8 +2,11 @@ // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; +using System.Globalization; using System.Linq; using System.Numerics; +using System.Threading; +using Microsoft.DotNet.RemoteExecutor; using Test.Cryptography; using Xunit; @@ -192,6 +195,30 @@ public static void ReadDateTimeOffset_SingleValue_HappyPath(string expectedValue Assert.Equal(expectedValue.Offset, result.Offset); } + [ConditionalFact(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] + public static void ReadDateTimeOffset_IsCultureInvariant() + { + // Regression test for https://github.com/dotnet/runtime/pull/92539 + RemoteExecutor.Invoke(static () => + { + DateTimeOffset expectedValue = DateTimeOffset.Parse("2020-04-09T14:31:21.3535941+01:00", CultureInfo.InvariantCulture); + byte[] data = "c07821323032302d30342d30395431343a33313a32312e333533353934312b30313a3030".HexToByteArray(); + + // Install a non-Gregorian calendar + var culture = new CultureInfo("he-IL"); + culture.DateTimeFormat.Calendar = new HebrewCalendar(); + Thread.CurrentThread.CurrentCulture = culture; + + var reader = new CborReader(data); + + DateTimeOffset result = reader.ReadDateTimeOffset(); + + Assert.Equal(CborReaderState.Finished, reader.PeekState()); + Assert.Equal(expectedValue, result); + Assert.Equal(expectedValue.Offset, result.Offset); + }).Dispose(); + } + [Theory] [InlineData("c01a514b67b0")] // string datetime tag with unix time payload public static void ReadDateTimeOffset_InvalidTagPayload_ShouldThrowCborContentException(string hexEncoding) @@ -206,6 +233,7 @@ public static void ReadDateTimeOffset_InvalidTagPayload_ShouldThrowCborContentEx [Theory] [InlineData("c07330392f30342f323032302031393a35313a3530")] // 0("09/04/2020 19:51:50") [InlineData("c06e4c617374204368726973746d6173")] // 0("Last Christmas") + [InlineData("c07828d7aad7a922d7a42dd796272dd79822d7955431343a33313a32312e333533353934312b30313a3030")] // Non-Gregorian calendar date. public static void ReadDateTimeOffset_InvalidDateString_ShouldThrowCborContentException(string hexEncoding) { byte[] encoding = hexEncoding.HexToByteArray(); diff --git a/src/libraries/System.Formats.Cbor/tests/System.Formats.Cbor.Tests.csproj b/src/libraries/System.Formats.Cbor/tests/System.Formats.Cbor.Tests.csproj index 2ade4c628c7fb..bf7b2f2b4aac5 100644 --- a/src/libraries/System.Formats.Cbor/tests/System.Formats.Cbor.Tests.csproj +++ b/src/libraries/System.Formats.Cbor/tests/System.Formats.Cbor.Tests.csproj @@ -1,6 +1,7 @@ - + $(NetCoreAppCurrent);$(NetFrameworkCurrent) + true enable $(NoWarn);CS8002 diff --git a/src/libraries/System.Formats.Cbor/tests/Writer/CborWriterTests.Tag.cs b/src/libraries/System.Formats.Cbor/tests/Writer/CborWriterTests.Tag.cs index 3413eadc84cc3..a521f3c1de7ac 100644 --- a/src/libraries/System.Formats.Cbor/tests/Writer/CborWriterTests.Tag.cs +++ b/src/libraries/System.Formats.Cbor/tests/Writer/CborWriterTests.Tag.cs @@ -2,8 +2,11 @@ // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; +using System.Globalization; using System.Linq; using System.Numerics; +using System.Threading; +using Microsoft.DotNet.RemoteExecutor; using Test.Cryptography; using Xunit; @@ -88,6 +91,29 @@ public static void WriteDateTimeOffset_SingleValue_HappyPath(string valueString, AssertHelper.HexEqual(expectedHexEncoding.HexToByteArray(), encoding); } + [ConditionalFact(typeof(RemoteExecutor), nameof(RemoteExecutor.IsSupported))] + public static void WriteDateTimeOffset_IsCultureInvariant() + { + // Regression test for https://github.com/dotnet/runtime/pull/92539 + RemoteExecutor.Invoke(static () => + { + DateTimeOffset value = DateTimeOffset.Parse("2020-04-09T14:31:21.3535941+01:00", CultureInfo.InvariantCulture); + string expectedHexEncoding = "c07821323032302d30342d30395431343a33313a32312e333533353934312b30313a3030"; + + // Install a non-Gregorian calendar + var culture = new CultureInfo("he-IL"); + culture.DateTimeFormat.Calendar = new HebrewCalendar(); + Thread.CurrentThread.CurrentCulture = culture; + + var writer = new CborWriter(); + + writer.WriteDateTimeOffset(value); + + byte[] encoding = writer.Encode(); + AssertHelper.HexEqual(expectedHexEncoding.HexToByteArray(), encoding); + }).Dispose(); + } + [Theory] [InlineData(1363896240, "c11a514b67b0")] [InlineData(1586439081, "c11a5e8f23a9")] diff --git a/src/libraries/System.Formats.Tar/tests/System.Formats.Tar.Tests.csproj b/src/libraries/System.Formats.Tar/tests/System.Formats.Tar.Tests.csproj index f3a341d4adc63..144749edded03 100644 --- a/src/libraries/System.Formats.Tar/tests/System.Formats.Tar.Tests.csproj +++ b/src/libraries/System.Formats.Tar/tests/System.Formats.Tar.Tests.csproj @@ -3,7 +3,6 @@ $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix true $(MSBuildProjectDirectory)\..\src\Resources\Strings.resx - true true diff --git a/src/libraries/System.IO.Compression.ZipFile/tests/System.IO.Compression.ZipFile.Tests.csproj b/src/libraries/System.IO.Compression.ZipFile/tests/System.IO.Compression.ZipFile.Tests.csproj index be0d1f463fad7..e07ea71438a4f 100644 --- a/src/libraries/System.IO.Compression.ZipFile/tests/System.IO.Compression.ZipFile.Tests.csproj +++ b/src/libraries/System.IO.Compression.ZipFile/tests/System.IO.Compression.ZipFile.Tests.csproj @@ -1,6 +1,5 @@ - true true true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser diff --git a/src/libraries/System.IO.FileSystem.Watcher/tests/System.IO.FileSystem.Watcher.Tests.csproj b/src/libraries/System.IO.FileSystem.Watcher/tests/System.IO.FileSystem.Watcher.Tests.csproj index 25892f3a7d0b5..1e43fca4491b3 100644 --- a/src/libraries/System.IO.FileSystem.Watcher/tests/System.IO.FileSystem.Watcher.Tests.csproj +++ b/src/libraries/System.IO.FileSystem.Watcher/tests/System.IO.FileSystem.Watcher.Tests.csproj @@ -4,7 +4,6 @@ true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-linux;$(NetCoreAppCurrent)-osx;$(NetCoreAppCurrent)-maccatalyst;$(NetCoreAppCurrent)-freebsd true - true diff --git a/src/libraries/System.IO.FileSystem/tests/DisabledFileLockingTests/System.IO.FileSystem.DisabledFileLocking.Tests.csproj b/src/libraries/System.IO.FileSystem/tests/DisabledFileLockingTests/System.IO.FileSystem.DisabledFileLocking.Tests.csproj index 2ade72e35e2ca..8f474ca378fc9 100644 --- a/src/libraries/System.IO.FileSystem/tests/DisabledFileLockingTests/System.IO.FileSystem.DisabledFileLocking.Tests.csproj +++ b/src/libraries/System.IO.FileSystem/tests/DisabledFileLockingTests/System.IO.FileSystem.DisabledFileLocking.Tests.csproj @@ -2,7 +2,6 @@ true true - true $(NetCoreAppCurrent)-unix diff --git a/src/libraries/System.IO.FileSystem/tests/System.IO.FileSystem.Tests.csproj b/src/libraries/System.IO.FileSystem/tests/System.IO.FileSystem.Tests.csproj index 7547ddc53b2bb..069f1cc08949a 100644 --- a/src/libraries/System.IO.FileSystem/tests/System.IO.FileSystem.Tests.csproj +++ b/src/libraries/System.IO.FileSystem/tests/System.IO.FileSystem.Tests.csproj @@ -5,7 +5,6 @@ $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser --working-dir=/test-dir - true diff --git a/src/libraries/System.IO.MemoryMappedFiles/tests/System.IO.MemoryMappedFiles.Tests.csproj b/src/libraries/System.IO.MemoryMappedFiles/tests/System.IO.MemoryMappedFiles.Tests.csproj index 7905e969060b9..15406d7bdf622 100644 --- a/src/libraries/System.IO.MemoryMappedFiles/tests/System.IO.MemoryMappedFiles.Tests.csproj +++ b/src/libraries/System.IO.MemoryMappedFiles/tests/System.IO.MemoryMappedFiles.Tests.csproj @@ -3,7 +3,6 @@ true true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser - true diff --git a/src/libraries/System.IO.Pipes/tests/System.IO.Pipes.Tests.csproj b/src/libraries/System.IO.Pipes/tests/System.IO.Pipes.Tests.csproj index c1118486917b8..ef930d859fe19 100644 --- a/src/libraries/System.IO.Pipes/tests/System.IO.Pipes.Tests.csproj +++ b/src/libraries/System.IO.Pipes/tests/System.IO.Pipes.Tests.csproj @@ -4,7 +4,6 @@ true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent) true - true diff --git a/src/libraries/System.IO.Ports/src/System.IO.Ports.csproj b/src/libraries/System.IO.Ports/src/System.IO.Ports.csproj index 7ef8f91d1f6cc..b40713ebe5805 100644 --- a/src/libraries/System.IO.Ports/src/System.IO.Ports.csproj +++ b/src/libraries/System.IO.Ports/src/System.IO.Ports.csproj @@ -1,6 +1,7 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious)-unix;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum)-unix;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum)-unix;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious)-unix;$(NetCoreAppPrevious) true $(DefineConstants);SERIAL_PORTS true diff --git a/src/libraries/System.Management/src/System.Management.csproj b/src/libraries/System.Management/src/System.Management.csproj index d07fdf39c2d0a..b68827d868adc 100644 --- a/src/libraries/System.Management/src/System.Management.csproj +++ b/src/libraries/System.Management/src/System.Management.csproj @@ -1,6 +1,7 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0 + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0 + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true $(NoWarn);0618 $(NoWarn);IDE0059;IDE0060;CA1822 diff --git a/src/libraries/System.Net.Http.WinHttpHandler/src/System.Net.Http.WinHttpHandler.csproj b/src/libraries/System.Net.Http.WinHttpHandler/src/System.Net.Http.WinHttpHandler.csproj index 4ae07fa2f55bc..1ba7ebc7eade7 100644 --- a/src/libraries/System.Net.Http.WinHttpHandler/src/System.Net.Http.WinHttpHandler.csproj +++ b/src/libraries/System.Net.Http.WinHttpHandler/src/System.Net.Http.WinHttpHandler.csproj @@ -1,6 +1,7 @@ - $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent);$(NetCoreAppMinimum)-windows;$(NetCoreAppMinimum);netstandard2.0;$(NetFrameworkMinimum) + $(TargetFrameworks);$(NetCoreAppPrevious)-windows;$(NetCoreAppPrevious) true true true diff --git a/src/libraries/System.Net.Http.WinHttpHandler/tests/UnitTests/System.Net.Http.WinHttpHandler.Unit.Tests.csproj b/src/libraries/System.Net.Http.WinHttpHandler/tests/UnitTests/System.Net.Http.WinHttpHandler.Unit.Tests.csproj index b459c0e2c9545..4ef078b0efa9a 100644 --- a/src/libraries/System.Net.Http.WinHttpHandler/tests/UnitTests/System.Net.Http.WinHttpHandler.Unit.Tests.csproj +++ b/src/libraries/System.Net.Http.WinHttpHandler/tests/UnitTests/System.Net.Http.WinHttpHandler.Unit.Tests.csproj @@ -5,7 +5,6 @@ ../../src/Resources/Strings.resx $(NetCoreAppCurrent)-windows UNITTEST - true @@ -29,7 +28,7 @@ Link="Common\System\CharArrayHelpers.cs" /> - diff --git a/src/libraries/System.Net.Http/tests/FunctionalTests/System.Net.Http.Functional.Tests.csproj b/src/libraries/System.Net.Http/tests/FunctionalTests/System.Net.Http.Functional.Tests.csproj index cdbc9bff807bf..fdb94e9caff24 100644 --- a/src/libraries/System.Net.Http/tests/FunctionalTests/System.Net.Http.Functional.Tests.csproj +++ b/src/libraries/System.Net.Http/tests/FunctionalTests/System.Net.Http.Functional.Tests.csproj @@ -7,7 +7,6 @@ true true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-linux;$(NetCoreAppCurrent)-browser;$(NetCoreAppCurrent)-osx - true true true diff --git a/src/libraries/System.Net.Http/tests/UnitTests/System.Net.Http.Unit.Tests.csproj b/src/libraries/System.Net.Http/tests/UnitTests/System.Net.Http.Unit.Tests.csproj index 10221bf21670c..b8767659ce00d 100755 --- a/src/libraries/System.Net.Http/tests/UnitTests/System.Net.Http.Unit.Tests.csproj +++ b/src/libraries/System.Net.Http/tests/UnitTests/System.Net.Http.Unit.Tests.csproj @@ -4,7 +4,6 @@ true true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser;$(NetCoreAppCurrent)-osx;$(NetCoreAppCurrent)-maccatalyst;$(NetCoreAppCurrent)-ios;$(NetCoreAppCurrent)-tvos;$(NetCoreAppCurrent)-android - true diff --git a/src/libraries/System.Net.Mail/tests/Functional/System.Net.Mail.Functional.Tests.csproj b/src/libraries/System.Net.Mail/tests/Functional/System.Net.Mail.Functional.Tests.csproj index b2ce00275613a..6ca3cb08be0d9 100644 --- a/src/libraries/System.Net.Mail/tests/Functional/System.Net.Mail.Functional.Tests.csproj +++ b/src/libraries/System.Net.Mail/tests/Functional/System.Net.Mail.Functional.Tests.csproj @@ -2,7 +2,6 @@ true $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser;$(NetCoreAppCurrent)-osx;$(NetCoreAppCurrent)-ios;$(NetCoreAppCurrent)-android - true true true true diff --git a/src/libraries/System.Net.Mail/tests/Unit/System.Net.Mail.Unit.Tests.csproj b/src/libraries/System.Net.Mail/tests/Unit/System.Net.Mail.Unit.Tests.csproj index 9ea5763a4e514..78b8b0bcbc366 100644 --- a/src/libraries/System.Net.Mail/tests/Unit/System.Net.Mail.Unit.Tests.csproj +++ b/src/libraries/System.Net.Mail/tests/Unit/System.Net.Mail.Unit.Tests.csproj @@ -3,7 +3,6 @@ true ../../src/Resources/Strings.resx $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser - true @@ -141,7 +140,7 @@ Link="Common\System\Net\NegotiationInfoClass.cs" /> - diff --git a/src/libraries/System.Net.NameResolution/tests/PalTests/System.Net.NameResolution.Pal.Tests.csproj b/src/libraries/System.Net.NameResolution/tests/PalTests/System.Net.NameResolution.Pal.Tests.csproj index 5256dd8848232..6e08a5f973e09 100644 --- a/src/libraries/System.Net.NameResolution/tests/PalTests/System.Net.NameResolution.Pal.Tests.csproj +++ b/src/libraries/System.Net.NameResolution/tests/PalTests/System.Net.NameResolution.Pal.Tests.csproj @@ -4,7 +4,6 @@ ../../src/Resources/Strings.resx $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser true - true diff --git a/src/libraries/System.Net.NetworkInformation/src/System/Net/NetworkInformation/StringParsingHelpers.Statistics.cs b/src/libraries/System.Net.NetworkInformation/src/System/Net/NetworkInformation/StringParsingHelpers.Statistics.cs index 80badaef43542..315970f9fa11f 100644 --- a/src/libraries/System.Net.NetworkInformation/src/System/Net/NetworkInformation/StringParsingHelpers.Statistics.cs +++ b/src/libraries/System.Net.NetworkInformation/src/System/Net/NetworkInformation/StringParsingHelpers.Statistics.cs @@ -407,15 +407,25 @@ internal static IPInterfaceStatisticsTable ParseInterfaceStatisticsTableFromFile { sr.ReadLine(); sr.ReadLine(); - int index = 0; + Span pieces = stackalloc Range[18]; // [0]-[16] used, +1 to ensure any additional segment goes into [17] while (!sr.EndOfStream) { string line = sr.ReadLine()!; if (line.Contains(name)) { - Span pieces = stackalloc Range[18]; // [0] skipped, [1]-[16] used, +1 to ensure any additional segment goes into [17] ReadOnlySpan lineSpan = line; - pieces = pieces.Slice(0, lineSpan.SplitAny(pieces, " :", StringSplitOptions.RemoveEmptyEntries)); + int pieceCount = lineSpan.SplitAny(pieces, " :", StringSplitOptions.RemoveEmptyEntries); + + if (pieceCount < 17) + { + continue; + } + + if (!lineSpan[pieces[0]].SequenceEqual(name)) + { + // The adapter name doesn't exactly match. + continue; + } return new IPInterfaceStatisticsTable() { @@ -438,7 +448,6 @@ internal static IPInterfaceStatisticsTable ParseInterfaceStatisticsTableFromFile CompressedPacketsTransmitted = ParseUInt64AndClampToInt64(lineSpan[pieces[16]]), }; } - index += 1; } throw ExceptionHelper.CreateForParseFailure(); diff --git a/src/libraries/System.Net.NetworkInformation/tests/FunctionalTests/NetworkFiles/dev b/src/libraries/System.Net.NetworkInformation/tests/FunctionalTests/NetworkFiles/dev index d87ec31fead0b..922eae91096fc 100644 --- a/src/libraries/System.Net.NetworkInformation/tests/FunctionalTests/NetworkFiles/dev +++ b/src/libraries/System.Net.NetworkInformation/tests/FunctionalTests/NetworkFiles/dev @@ -1,4 +1,5 @@ Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed +wlan0a: 20000 394 2 4 6 8 10 12 429496730000 208 1 2 3 4 5 6 wlan0: 26622 394 2 4 6 8 10 12 429496730000 208 1 2 3 4 5 6 lo: 18446744073709551615 302 0 0 0 0 0 0 30008 302 0 0 0 0 0 0 diff --git a/src/libraries/System.Net.Ping/tests/FunctionalTests/System.Net.Ping.Functional.Tests.csproj b/src/libraries/System.Net.Ping/tests/FunctionalTests/System.Net.Ping.Functional.Tests.csproj index 1187130b5aaa1..bdacb51543d8c 100644 --- a/src/libraries/System.Net.Ping/tests/FunctionalTests/System.Net.Ping.Functional.Tests.csproj +++ b/src/libraries/System.Net.Ping/tests/FunctionalTests/System.Net.Ping.Functional.Tests.csproj @@ -4,7 +4,6 @@ true true true - true diff --git a/src/libraries/System.Net.Primitives/tests/PalTests/System.Net.Primitives.Pal.Tests.csproj b/src/libraries/System.Net.Primitives/tests/PalTests/System.Net.Primitives.Pal.Tests.csproj index 03c17baf2ff96..90fbc1e3d31ba 100644 --- a/src/libraries/System.Net.Primitives/tests/PalTests/System.Net.Primitives.Pal.Tests.csproj +++ b/src/libraries/System.Net.Primitives/tests/PalTests/System.Net.Primitives.Pal.Tests.csproj @@ -3,7 +3,6 @@ true ../../src/Resources/Strings.resx $(NetCoreAppCurrent)-windows;$(NetCoreAppCurrent)-unix;$(NetCoreAppCurrent)-browser - true Input span arguments must all have the same length. - \ No newline at end of file + + The destination span may only overlap with an input span if the two spans start at the same memory location. + + diff --git a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.cs b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.cs index d28d4bacafdb8..b1b96e55bb44f 100644 --- a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.cs +++ b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.cs @@ -1,236 +1,152 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + namespace System.Numerics.Tensors { /// Performs primitive tensor operations over spans of memory. public static partial class TensorPrimitives { - /// Computes the element-wise result of: + . - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = [i] + [i]. - public static unsafe void Add(ReadOnlySpan x, ReadOnlySpan y, Span destination) => - InvokeSpanSpanIntoSpan(x, y, destination); - - /// Computes the element-wise result of: + . - /// The first tensor, represented as a span. - /// The second tensor, represented as a scalar. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = [i] + . - public static void Add(ReadOnlySpan x, float y, Span destination) => - InvokeSpanScalarIntoSpan(x, y, destination); - - /// Computes the element-wise result of: - . - /// The first tensor, represented as a span. - /// The second tensor, represented as a scalar. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = [i] - [i]. - public static void Subtract(ReadOnlySpan x, ReadOnlySpan y, Span destination) => - InvokeSpanSpanIntoSpan(x, y, destination); - - /// Computes the element-wise result of: - . - /// The first tensor, represented as a span. - /// The second tensor, represented as a scalar. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = [i] - . - public static void Subtract(ReadOnlySpan x, float y, Span destination) => - InvokeSpanScalarIntoSpan(x, y, destination); - - /// Computes the element-wise result of: * . - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = [i] * . - public static void Multiply(ReadOnlySpan x, ReadOnlySpan y, Span destination) => - InvokeSpanSpanIntoSpan(x, y, destination); - - /// Computes the element-wise result of: * . - /// The first tensor, represented as a span. - /// The second tensor, represented as a scalar. + /// Computes the element-wise absolute value of each single-precision floating-point number in the specified tensor. + /// The tensor, represented as a span. /// The destination tensor, represented as a span. /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. /// - /// This method effectively does [i] = [i] * . - /// This method corresponds to the scal method defined by BLAS1. + /// + /// This method effectively computes [i] = MathF.Abs([i]). + /// + /// + /// The absolute value of a is its numeric value without its sign. For example, the absolute value of both 1.2e-03 and -1.2e03 is 1.2e03. + /// + /// + /// If a value is equal to or , the result stored into the corresponding destination location is set to . + /// If a value is equal to , the result stored into the corresponding destination location is the original NaN value with the sign bit removed. + /// /// - public static void Multiply(ReadOnlySpan x, float y, Span destination) => - InvokeSpanScalarIntoSpan(x, y, destination); + public static void Abs(ReadOnlySpan x, Span destination) => + InvokeSpanIntoSpan(x, destination); - /// Computes the element-wise result of: / . + /// Computes the element-wise addition of single-precision floating-point numbers in the specified tensors. /// The first tensor, represented as a span. /// The second tensor, represented as a span. /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. + /// Length of must be same as length of . /// Destination is too short. - /// This method effectively does [i] = [i] / . - public static void Divide(ReadOnlySpan x, ReadOnlySpan y, Span destination) => - InvokeSpanSpanIntoSpan(x, y, destination); + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = [i] + [i]. + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static unsafe void Add(ReadOnlySpan x, ReadOnlySpan y, Span destination) => + InvokeSpanSpanIntoSpan(x, y, destination); - /// Computes the element-wise result of: / . + /// Computes the element-wise addition of single-precision floating-point numbers in the specified tensors. /// The first tensor, represented as a span. /// The second tensor, represented as a scalar. /// The destination tensor, represented as a span. /// Destination is too short. - /// This method effectively does [i] = [i] / . - public static void Divide(ReadOnlySpan x, float y, Span destination) => - InvokeSpanScalarIntoSpan(x, y, destination); - - /// Computes the element-wise result of: -. - /// The tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = -[i]. - public static void Negate(ReadOnlySpan x, Span destination) => - InvokeSpanIntoSpan(x, destination); - - /// Computes the element-wise result of: MathF.Abs(). - /// The tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = MathF.Abs([i]). - public static void Abs(ReadOnlySpan x, Span destination) => - InvokeSpanIntoSpan(x, destination); + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = [i] + . + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void Add(ReadOnlySpan x, float y, Span destination) => + InvokeSpanScalarIntoSpan(x, y, destination); - /// Computes the element-wise result of: ( + ) * . + /// Computes the element-wise result of ( + ) * for the specified tensors. /// The first tensor, represented as a span. /// The second tensor, represented as a span. /// The third tensor, represented as a span. /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Length of '' must be same as length of ''. + /// Length of must be same as length of and the length of . /// Destination is too short. - /// This method effectively does [i] = ([i] + [i]) * [i]. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = ([i] + [i]) * [i]. + /// + /// + /// If any of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// public static void AddMultiply(ReadOnlySpan x, ReadOnlySpan y, ReadOnlySpan multiplier, Span destination) => InvokeSpanSpanSpanIntoSpan(x, y, multiplier, destination); - /// Computes the element-wise result of: ( + ) * . + /// Computes the element-wise result of ( + ) * for the specified tensors. /// The first tensor, represented as a span. /// The second tensor, represented as a span. /// The third tensor, represented as a scalar. /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. + /// Length of must be same as length of . /// Destination is too short. - /// This method effectively does [i] = ([i] + [i]) * . + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = ([i] + [i]) * . + /// + /// + /// If any of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// public static void AddMultiply(ReadOnlySpan x, ReadOnlySpan y, float multiplier, Span destination) => InvokeSpanSpanScalarIntoSpan(x, y, multiplier, destination); - /// Computes the element-wise result of: ( + ) * . + /// Computes the element-wise result of ( + ) * for the specified tensors. /// The first tensor, represented as a span. /// The second tensor, represented as a scalar. /// The third tensor, represented as a span. /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. + /// Length of must be same as length of . /// Destination is too short. - /// This method effectively does [i] = ([i] + ) * [i]. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = ([i] + ) * [i]. + /// + /// + /// If any of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// public static void AddMultiply(ReadOnlySpan x, float y, ReadOnlySpan multiplier, Span destination) => InvokeSpanScalarSpanIntoSpan(x, y, multiplier, destination); - /// Computes the element-wise result of: ( * ) + . - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The third tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = ([i] * [i]) + [i]. - public static void MultiplyAdd(ReadOnlySpan x, ReadOnlySpan y, ReadOnlySpan addend, Span destination) => - InvokeSpanSpanSpanIntoSpan(x, y, addend, destination); - - /// Computes the element-wise result of: ( * ) + . - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The third tensor, represented as a span. + /// Computes the element-wise hyperbolic cosine of each single-precision floating-point radian angle in the specified tensor. + /// The tensor, represented as a span. /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. /// - /// This method effectively does [i] = ([i] * [i]) + . - /// This method corresponds to the axpy method defined by BLAS1. + /// + /// This method effectively computes [i] = .Cosh([i]). + /// + /// + /// If a value is equal to or , the result stored into the corresponding destination location is set to . + /// If a value is equal to , the result stored into the corresponding destination location is also NaN. + /// + /// + /// The angles in x must be in radians. Use or multiply by /180 to convert degrees to radians. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// /// - public static void MultiplyAdd(ReadOnlySpan x, ReadOnlySpan y, float addend, Span destination) => - InvokeSpanSpanScalarIntoSpan(x, y, addend, destination); - - /// Computes the element-wise result of: ( * ) + . - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The third tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = ([i] * ) + [i]. - public static void MultiplyAdd(ReadOnlySpan x, float y, ReadOnlySpan addend, Span destination) => - InvokeSpanScalarSpanIntoSpan(x, y, addend, destination); - - /// Computes the element-wise result of: pow(e, ). - /// The tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = .Exp([i]). - public static void Exp(ReadOnlySpan x, Span destination) - { - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } - - for (int i = 0; i < x.Length; i++) - { - destination[i] = MathF.Exp(x[i]); - } - } - - /// Computes the element-wise result of: ln(). - /// The tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = .Log([i]). - public static void Log(ReadOnlySpan x, Span destination) - { - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } - - for (int i = 0; i < x.Length; i++) - { - destination[i] = MathF.Log(x[i]); - } - } - - /// Computes the element-wise result of: log2(). - /// The tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = .Log2([i]). - public static void Log2(ReadOnlySpan x, Span destination) - { - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } - - for (int i = 0; i < x.Length; i++) - { - destination[i] = Log2(x[i]); - } - } - - /// Computes the element-wise result of: cosh(). - /// The tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = .Cosh([i]). public static void Cosh(ReadOnlySpan x, Span destination) { if (x.Length > destination.Length) @@ -238,60 +154,40 @@ public static void Cosh(ReadOnlySpan x, Span destination) ThrowHelper.ThrowArgument_DestinationTooShort(); } - for (int i = 0; i < x.Length; i++) - { - destination[i] = MathF.Cosh(x[i]); - } - } - - /// Computes the element-wise result of: sinh(). - /// The tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = .Sinh([i]). - public static void Sinh(ReadOnlySpan x, Span destination) - { - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } - - for (int i = 0; i < x.Length; i++) - { - destination[i] = MathF.Sinh(x[i]); - } - } - - /// Computes the element-wise result of: tanh(). - /// The tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Destination is too short. - /// This method effectively does [i] = .Tanh([i]). - public static void Tanh(ReadOnlySpan x, Span destination) - { - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } + ValidateInputOutputSpanNonOverlapping(x, destination); for (int i = 0; i < x.Length; i++) { - destination[i] = MathF.Tanh(x[i]); + destination[i] = MathF.Cosh(x[i]); } } - /// Computes the cosine similarity between two non-zero vectors. + /// Computes the cosine similarity between the two specified non-empty, equal-length tensors of single-precision floating-point numbers. /// The first tensor, represented as a span. /// The second tensor, represented as a span. - /// The cosine similarity between the two vectors. - /// Length of '' must be same as length of ''. - /// '' and '' must not be empty. + /// The cosine similarity of the two tensors. + /// Length of must be same as length of . + /// and must not be empty. + /// + /// + /// This method effectively computes TensorPrimitives.Dot(x, y) / (MathF.Sqrt(TensorPrimitives.SumOfSquares(x)) * MathF.Sqrt(TensorPrimitives.SumOfSquares(y)). + /// + /// + /// If any element in either input tensor is equal to , , or , + /// NaN is returned. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// public static float CosineSimilarity(ReadOnlySpan x, ReadOnlySpan y) { if (x.IsEmpty) { ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); } + if (x.Length != y.Length) { ThrowHelper.ThrowArgument_SpansMustHaveSameLength(); @@ -300,408 +196,170 @@ public static float CosineSimilarity(ReadOnlySpan x, ReadOnlySpan return CosineSimilarityCore(x, y); } - /// - /// Compute the distance between two points in Euclidean space. - /// + /// Computes the distance between two points, specified as non-empty, equal-length tensors of single-precision floating-point numbers, in Euclidean space. /// The first tensor, represented as a span. /// The second tensor, represented as a span. /// The Euclidean distance. - /// Length of '' must be same as length of ''. - /// '' and '' must not be empty. + /// Length of must be same as length of . + /// and must not be empty. + /// + /// + /// This method effectively computes the equivalent of: + /// + /// Span<float> difference = ...; + /// TensorPrimitives.Subtract(x, y, difference); + /// float result = MathF.Sqrt(TensorPrimitives.SumOfSquares(difference)); + /// + /// but without requiring additional temporary storage for the intermediate differences. + /// + /// + /// If any element in either input tensor is equal to , NaN is returned. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// public static float Distance(ReadOnlySpan x, ReadOnlySpan y) { if (x.IsEmpty) { ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); } + if (x.Length != y.Length) { ThrowHelper.ThrowArgument_SpansMustHaveSameLength(); } - return MathF.Sqrt(Aggregate(0f, x, y)); + return MathF.Sqrt(Aggregate(x, y)); } - /// - /// A mathematical operation that takes two vectors and returns a scalar. - /// + /// Computes the element-wise division of single-precision floating-point numbers in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = [i] / [i]. + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void Divide(ReadOnlySpan x, ReadOnlySpan y, Span destination) => + InvokeSpanSpanIntoSpan(x, y, destination); + + /// Computes the element-wise division of single-precision floating-point numbers in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a scalar. + /// The destination tensor, represented as a span. + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = [i] / . + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void Divide(ReadOnlySpan x, float y, Span destination) => + InvokeSpanScalarIntoSpan(x, y, destination); + + /// Computes the dot product of two tensors containing single-precision floating-point numbers. /// The first tensor, represented as a span. /// The second tensor, represented as a span. /// The dot product. - /// Length of '' must be same as length of ''. - public static float Dot(ReadOnlySpan x, ReadOnlySpan y) // BLAS1: dot + /// Length of must be same as length of . + /// + /// + /// This method effectively computes the equivalent of: + /// + /// Span<float> products = ...; + /// TensorPrimitives.Multiply(x, y, products); + /// float result = TensorPrimitives.Sum(products); + /// + /// but without requiring additional temporary storage for the intermediate products. It corresponds to the dot method defined by BLAS1. + /// + /// + /// If any of the input elements is equal to , the resulting value is also NaN. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float Dot(ReadOnlySpan x, ReadOnlySpan y) { if (x.Length != y.Length) { ThrowHelper.ThrowArgument_SpansMustHaveSameLength(); } - return Aggregate(0f, x, y); - } - - /// - /// A mathematical operation that takes a vector and returns the L2 norm. - /// - /// The first tensor, represented as a span. - /// The L2 norm. - public static float Norm(ReadOnlySpan x) // BLAS1: nrm2 - { - return MathF.Sqrt(Aggregate(0f, x)); + return Aggregate(x, y); } - /// - /// A function that takes a collection of real numbers and returns a probability distribution. - /// - /// The first tensor, represented as a span. - /// The destination tensor. + /// Computes the element-wise result of raising e to the single-precision floating-point number powers in the specified tensor. + /// The tensor, represented as a span. + /// The destination tensor, represented as a span. /// Destination is too short. - /// '' must not be empty. - public static void SoftMax(ReadOnlySpan x, Span destination) + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = .Exp([i]). + /// + /// + /// If a value equals or , the result stored into the corresponding destination location is set to NaN. + /// If a value equals , the result stored into the corresponding destination location is set to 0. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void Exp(ReadOnlySpan x, Span destination) { - if (x.IsEmpty) - { - ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); - } if (x.Length > destination.Length) { ThrowHelper.ThrowArgument_DestinationTooShort(); } - float expSum = 0f; - - for (int i = 0; i < x.Length; i++) - { - expSum += MathF.Exp(x[i]); - } + ValidateInputOutputSpanNonOverlapping(x, destination); for (int i = 0; i < x.Length; i++) { - destination[i] = MathF.Exp(x[i]) / expSum; + destination[i] = MathF.Exp(x[i]); } } - /// - /// A function that takes a real number and returns a value between 0 and 1. - /// - /// The first tensor, represented as a span. - /// The destination tensor. - /// Destination is too short. - /// '' must not be empty. - public static void Sigmoid(ReadOnlySpan x, Span destination) + /// Searches for the index of the largest single-precision floating-point number in the specified tensor. + /// The tensor, represented as a span. + /// The index of the maximum element in , or -1 if is empty. + /// + /// + /// The determination of the maximum element matches the IEEE 754:2019 `maximum` function. If any value equal to + /// is present, the index of the first is returned. Positive 0 is considered greater than negative 0. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static unsafe int IndexOfMax(ReadOnlySpan x) { - if (x.IsEmpty) - { - ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); - } - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } + int result = -1; - for (int i = 0; i < x.Length; i++) + if (!x.IsEmpty) { - destination[i] = 1f / (1 + MathF.Exp(-x[i])); - } - } - - /// Computes the maximum element in . - /// The tensor, represented as a span. - /// The maximum element in . - /// Length of '' must be greater than zero. - public static float Max(ReadOnlySpan x) - { - if (x.IsEmpty) - { - ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); - } - - float result = float.NegativeInfinity; - - for (int i = 0; i < x.Length; i++) - { - // This matches the IEEE 754:2019 `maximum` function. - // It propagates NaN inputs back to the caller and - // otherwise returns the greater of the inputs. - // It treats +0 as greater than -0 as per the specification. - - float current = x[i]; - - if (current != result) - { - if (float.IsNaN(current)) - { - return current; - } - - if (result < current) - { - result = current; - } - } - else if (IsNegative(result)) - { - result = current; - } - } - - return result; - } - - /// Computes the element-wise result of: MathF.Max(, ). - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = MathF.Max([i], [i]). - public static void Max(ReadOnlySpan x, ReadOnlySpan y, Span destination) - { - if (x.Length != y.Length) - { - ThrowHelper.ThrowArgument_SpansMustHaveSameLength(); - } - - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } - - for (int i = 0; i < x.Length; i++) - { - destination[i] = MathF.Max(x[i], y[i]); - } - } - - /// Computes the minimum element in . - /// The tensor, represented as a span. - /// The minimum element in . - /// Length of '' must be greater than zero. - public static float Min(ReadOnlySpan x) - { - if (x.IsEmpty) - { - ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); - } - - float result = float.PositiveInfinity; - - for (int i = 0; i < x.Length; i++) - { - // This matches the IEEE 754:2019 `minimum` function - // It propagates NaN inputs back to the caller and - // otherwise returns the lesser of the inputs. - // It treats +0 as greater than -0 as per the specification. - - float current = x[i]; - - if (current != result) - { - if (float.IsNaN(current)) - { - return current; - } - - if (current < result) - { - result = current; - } - } - else if (IsNegative(current)) - { - result = current; - } - } - - return result; - } - - /// Computes the element-wise result of: MathF.Min(, ). - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = MathF.Min([i], [i]). - public static void Min(ReadOnlySpan x, ReadOnlySpan y, Span destination) - { - if (x.Length != y.Length) - { - ThrowHelper.ThrowArgument_SpansMustHaveSameLength(); - } - - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } - - for (int i = 0; i < x.Length; i++) - { - destination[i] = MathF.Min(x[i], y[i]); - } - } - - /// Computes the maximum magnitude of any element in . - /// The tensor, represented as a span. - /// The maximum magnitude of any element in . - /// Length of '' must be greater than zero. - public static float MaxMagnitude(ReadOnlySpan x) - { - if (x.IsEmpty) - { - ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); - } - - float result = float.NegativeInfinity; - float resultMag = float.NegativeInfinity; - - for (int i = 0; i < x.Length; i++) - { - // This matches the IEEE 754:2019 `maximumMagnitude` function. - // It propagates NaN inputs back to the caller and - // otherwise returns the input with a greater magnitude. - // It treats +0 as greater than -0 as per the specification. - - float current = x[i]; - float currentMag = Math.Abs(current); - - if (currentMag != resultMag) - { - if (float.IsNaN(currentMag)) - { - return currentMag; - } - - if (resultMag < currentMag) - { - result = current; - resultMag = currentMag; - } - } - else if (IsNegative(result)) - { - result = current; - resultMag = currentMag; - } - } - - return result; - } - - /// Computes the element-wise result of: MathF.MaxMagnitude(, ). - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = MathF.MaxMagnitude([i], [i]). - public static void MaxMagnitude(ReadOnlySpan x, ReadOnlySpan y, Span destination) - { - if (x.Length != y.Length) - { - ThrowHelper.ThrowArgument_SpansMustHaveSameLength(); - } - - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } - - for (int i = 0; i < x.Length; i++) - { - destination[i] = MaxMagnitude(x[i], y[i]); - } - } - - /// Computes the minimum magnitude of any element in . - /// The tensor, represented as a span. - /// The minimum magnitude of any element in . - /// Length of '' must be greater than zero. - public static float MinMagnitude(ReadOnlySpan x) - { - if (x.IsEmpty) - { - ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); - } - - float result = float.PositiveInfinity; - float resultMag = float.PositiveInfinity; - - for (int i = 0; i < x.Length; i++) - { - // This matches the IEEE 754:2019 `minimumMagnitude` function. - // It propagates NaN inputs back to the caller and - // otherwise returns the input with a lesser magnitude. - // It treats +0 as greater than -0 as per the specification. - - float current = x[i]; - float currentMag = Math.Abs(current); - - if (currentMag != resultMag) - { - if (float.IsNaN(currentMag)) - { - return currentMag; - } - - if (currentMag < resultMag) - { - result = current; - resultMag = currentMag; - } - } - else if (IsNegative(current)) - { - result = current; - resultMag = currentMag; - } - } - - return result; - } - - /// Computes the element-wise result of: MathF.MinMagnitude(, ). - /// The first tensor, represented as a span. - /// The second tensor, represented as a span. - /// The destination tensor, represented as a span. - /// Length of '' must be same as length of ''. - /// Destination is too short. - /// This method effectively does [i] = MathF.MinMagnitude([i], [i]). - public static void MinMagnitude(ReadOnlySpan x, ReadOnlySpan y, Span destination) - { - if (x.Length != y.Length) - { - ThrowHelper.ThrowArgument_SpansMustHaveSameLength(); - } - - if (x.Length > destination.Length) - { - ThrowHelper.ThrowArgument_DestinationTooShort(); - } - - for (int i = 0; i < x.Length; i++) - { - destination[i] = MinMagnitude(x[i], y[i]); - } - } - - /// Computes the index of the maximum element in . - /// The tensor, represented as a span. - /// The index of the maximum element in , or -1 if is empty. - public static unsafe int IndexOfMax(ReadOnlySpan x) - { - int result = -1; - - if (!x.IsEmpty) - { - float max = float.NegativeInfinity; + float max = float.NegativeInfinity; for (int i = 0; i < x.Length; i++) { - // This matches the IEEE 754:2019 `maximum` function. - // It propagates NaN inputs back to the caller and - // otherwise returns the greater of the inputs. - // It treats +0 as greater than -0 as per the specification. - float current = x[i]; if (current != max) @@ -728,43 +386,53 @@ public static unsafe int IndexOfMax(ReadOnlySpan x) return result; } - /// Computes the index of the minimum element in . + /// Searches for the index of the single-precision floating-point number with the largest magnitude in the specified tensor. /// The tensor, represented as a span. - /// The index of the minimum element in , or -1 if is empty. - public static unsafe int IndexOfMin(ReadOnlySpan x) + /// The index of the element in with the largest magnitude (absolute value), or -1 if is empty. + /// + /// + /// The determination of the maximum magnitude matches the IEEE 754:2019 `maximumMagnitude` function. If any value equal to + /// is present, the index of the first is returned. If two values have the same magnitude and one is positive and the other is negative, + /// the positive value is considered to have the larger magnitude. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static unsafe int IndexOfMaxMagnitude(ReadOnlySpan x) { int result = -1; if (!x.IsEmpty) { - float min = float.PositiveInfinity; + float max = float.NegativeInfinity; + float maxMag = float.NegativeInfinity; for (int i = 0; i < x.Length; i++) { - // This matches the IEEE 754:2019 `minimum` function. - // It propagates NaN inputs back to the caller and - // otherwise returns the lesser of the inputs. - // It treats +0 as greater than -0 as per the specification. - float current = x[i]; + float currentMag = Math.Abs(current); - if (current != min) + if (currentMag != maxMag) { - if (float.IsNaN(current)) + if (float.IsNaN(currentMag)) { return i; } - if (current < min) + if (maxMag < currentMag) { result = i; - min = current; + max = current; + maxMag = currentMag; } } - else if (IsNegative(current) && !IsNegative(min)) + else if (IsNegative(max) && !IsNegative(current)) { result = i; - min = current; + max = current; + maxMag = currentMag; } } } @@ -772,48 +440,48 @@ public static unsafe int IndexOfMin(ReadOnlySpan x) return result; } - /// Computes the index of the element in with the maximum magnitude. + /// Searches for the index of the smallest single-precision floating-point number in the specified tensor. /// The tensor, represented as a span. - /// The index of the element with the maximum magnitude, or -1 if is empty. - /// This method corresponds to the iamax method defined by BLAS1. - public static unsafe int IndexOfMaxMagnitude(ReadOnlySpan x) + /// The index of the minimum element in , or -1 if is empty. + /// + /// + /// The determination of the minimum element matches the IEEE 754:2019 `minimum` function. If any value equal to + /// is present, the index of the first is returned. Negative 0 is considered smaller than positive 0. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static unsafe int IndexOfMin(ReadOnlySpan x) { int result = -1; if (!x.IsEmpty) { - float max = float.NegativeInfinity; - float maxMag = float.NegativeInfinity; + float min = float.PositiveInfinity; for (int i = 0; i < x.Length; i++) { - // This matches the IEEE 754:2019 `maximumMagnitude` function. - // It propagates NaN inputs back to the caller and - // otherwise returns the input with a greater magnitude. - // It treats +0 as greater than -0 as per the specification. - float current = x[i]; - float currentMag = Math.Abs(current); - if (currentMag != maxMag) + if (current != min) { - if (float.IsNaN(currentMag)) + if (float.IsNaN(current)) { return i; } - if (maxMag < currentMag) + if (current < min) { result = i; - max = current; - maxMag = currentMag; + min = current; } } - else if (IsNegative(max) && !IsNegative(current)) + else if (IsNegative(current) && !IsNegative(min)) { result = i; - max = current; - maxMag = currentMag; + min = current; } } } @@ -821,9 +489,20 @@ public static unsafe int IndexOfMaxMagnitude(ReadOnlySpan x) return result; } - /// Computes the index of the element in with the minimum magnitude. + /// Searches for the index of the single-precision floating-point number with the smallest magnitude in the specified tensor. /// The tensor, represented as a span. - /// The index of the element with the minimum magnitude, or -1 if is empty. + /// The index of the element in with the smallest magnitude (absolute value), or -1 if is empty. + /// + /// + /// The determination of the minimum magnitude matches the IEEE 754:2019 `minimumMagnitude` function. If any value equal to + /// is present, the index of the first is returned. If two values have the same magnitude and one is positive and the other is negative, + /// the negative value is considered to have the smaller magnitude. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// public static unsafe int IndexOfMinMagnitude(ReadOnlySpan x) { int result = -1; @@ -835,11 +514,6 @@ public static unsafe int IndexOfMinMagnitude(ReadOnlySpan x) for (int i = 0; i < x.Length; i++) { - // This matches the IEEE 754:2019 `minimumMagnitude` function - // It propagates NaN inputs back to the caller and - // otherwise returns the input with a lesser magnitude. - // It treats +0 as greater than -0 as per the specification. - float current = x[i]; float currentMag = Math.Abs(current); @@ -869,33 +543,359 @@ public static unsafe int IndexOfMinMagnitude(ReadOnlySpan x) return result; } - /// Computes the sum of all elements in . + /// Computes the element-wise natural (base e) logarithm of single-precision floating-point numbers in the specified tensor. /// The tensor, represented as a span. - /// The result of adding all elements in , or zero if is empty. - public static float Sum(ReadOnlySpan x) => - Aggregate(0f, x); + /// The destination tensor, represented as a span. + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = .Log([i]). + /// + /// + /// If a value equals 0, the result stored into the corresponding destination location is set to . + /// If a value is negative or equal to , the result stored into the corresponding destination location is set to NaN. + /// If a value is positive infinity, the result stored into the corresponding destination location is set to . + /// Otherwise, if a value is positive, its natural logarithm is stored into the corresponding destination location. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void Log(ReadOnlySpan x, Span destination) => + InvokeSpanIntoSpan(x, destination); - /// Computes the sum of the squares of every element in . + /// Computes the element-wise base 2 logarithm of single-precision floating-point numbers in the specified tensor. /// The tensor, represented as a span. - /// The result of adding every element in multiplied by itself, or zero if is empty. - /// This method effectively does .Sum(.Multiply(, )). - public static float SumOfSquares(ReadOnlySpan x) => - Aggregate(0f, x); + /// The destination tensor, represented as a span. + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = .Log2([i]). + /// + /// + /// If a value equals 0, the result stored into the corresponding destination location is set to . + /// If a value is negative or equal to , the result stored into the corresponding destination location is set to NaN. + /// If a value is positive infinity, the result stored into the corresponding destination location is set to . + /// Otherwise, if a value is positive, its natural logarithm is stored into the corresponding destination location. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void Log2(ReadOnlySpan x, Span destination) => + InvokeSpanIntoSpan(x, destination); - /// Computes the sum of the absolute values of every element in . + /// Searches for the largest single-precision floating-point number in the specified tensor. /// The tensor, represented as a span. - /// The result of adding the absolute value of every element in , or zero if is empty. + /// The maximum element in . + /// Length of must be greater than zero. /// - /// This method effectively does .Sum(.Abs()). - /// This method corresponds to the asum method defined by BLAS1. + /// + /// The determination of the maximum element matches the IEEE 754:2019 `maximum` function. If any value equal to + /// is present, the first is returned. Positive 0 is considered greater than negative 0. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// /// - public static float SumOfMagnitudes(ReadOnlySpan x) => - Aggregate(0f, x); + public static float Max(ReadOnlySpan x) => + MinMaxCore(x); + + /// Computes the element-wise maximum of the single-precision floating-point numbers in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = MathF.Max([i], [i]). + /// + /// + /// The determination of the maximum element matches the IEEE 754:2019 `maximum` function. If either value is equal to , + /// that value is stored as the result. Positive 0 is considered greater than negative 0. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void Max(ReadOnlySpan x, ReadOnlySpan y, Span destination) => + InvokeSpanSpanIntoSpan(x, y, destination); + + /// Searches for the single-precision floating-point number with the largest magnitude in the specified tensor. + /// The tensor, represented as a span. + /// The element in with the largest magnitude (absolute value). + /// Length of must be greater than zero. + /// + /// + /// The determination of the maximum magnitude matches the IEEE 754:2019 `maximumMagnitude` function. If any value equal to + /// is present, the first is returned. If two values have the same magnitude and one is positive and the other is negative, + /// the positive value is considered to have the larger magnitude. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float MaxMagnitude(ReadOnlySpan x) => + MinMaxCore(x); + + /// Computes the element-wise single-precision floating-point number with the largest magnitude in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// This method effectively computes [i] = MathF.MaxMagnitude([i], [i]). + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void MaxMagnitude(ReadOnlySpan x, ReadOnlySpan y, Span destination) => + InvokeSpanSpanIntoSpan(x, y, destination); + + /// Searches for the smallest single-precision floating-point number in the specified tensor. + /// The tensor, represented as a span. + /// The minimum element in . + /// Length of must be greater than zero. + /// + /// + /// The determination of the minimum element matches the IEEE 754:2019 `minimum` function. If any value is equal to + /// is present, the first is returned. Negative 0 is considered smaller than positive 0. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float Min(ReadOnlySpan x) => + MinMaxCore(x); + + /// Computes the element-wise minimum of the single-precision floating-point numbers in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = MathF.Max([i], [i]). + /// + /// + /// The determination of the maximum element matches the IEEE 754:2019 `maximum` function. If either value is equal to , + /// that value is stored as the result. Positive 0 is considered greater than negative 0. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void Min(ReadOnlySpan x, ReadOnlySpan y, Span destination) => + InvokeSpanSpanIntoSpan(x, y, destination); + + /// Searches for the single-precision floating-point number with the smallest magnitude in the specified tensor. + /// The tensor, represented as a span. + /// The element in with the smallest magnitude (absolute value). + /// Length of must be greater than zero. + /// + /// + /// The determination of the minimum magnitude matches the IEEE 754:2019 `minimumMagnitude` function. If any value equal to + /// is present, the first is returned. If two values have the same magnitude and one is positive and the other is negative, + /// the negative value is considered to have the smaller magnitude. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float MinMagnitude(ReadOnlySpan x) => + MinMaxCore(x); + + /// Computes the element-wise single-precision floating-point number with the smallest magnitude in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// This method effectively computes [i] = MathF.MinMagnitude([i], [i]). + /// + /// + /// The determination of the maximum magnitude matches the IEEE 754:2019 `minimumMagnitude` function. If either value is equal to , + /// that value is stored as the result. If the two values have the same magnitude and one is positive and the other is negative, + /// the negative value is considered to have the smaller magnitude. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void MinMagnitude(ReadOnlySpan x, ReadOnlySpan y, Span destination) => + InvokeSpanSpanIntoSpan(x, y, destination); + + /// Computes the element-wise product of single-precision floating-point numbers in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = [i] * [i]. + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void Multiply(ReadOnlySpan x, ReadOnlySpan y, Span destination) => + InvokeSpanSpanIntoSpan(x, y, destination); + + /// Computes the element-wise product of single-precision floating-point numbers in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a scalar. + /// The destination tensor, represented as a span. + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = [i] * . + /// It corresponds to the scal method defined by BLAS1. + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void Multiply(ReadOnlySpan x, float y, Span destination) => + InvokeSpanScalarIntoSpan(x, y, destination); + + /// Computes the element-wise result of ( * ) * for the specified tensors of single-precision floating-point numbers. + /// The first tensor, represented as a span. + /// The second tensor, represented as a span. + /// The third tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Length of must be same as length of and length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = ([i] * [i]) + [i]. + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void MultiplyAdd(ReadOnlySpan x, ReadOnlySpan y, ReadOnlySpan addend, Span destination) => + InvokeSpanSpanSpanIntoSpan(x, y, addend, destination); + + /// Computes the element-wise result of ( * ) * for the specified tensors of single-precision floating-point numbers. + /// The first tensor, represented as a span. + /// The second tensor, represented as a span. + /// The third tensor, represented as a scalar. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = ([i] * [i]) + . + /// It corresponds to the axpy method defined by BLAS1. + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void MultiplyAdd(ReadOnlySpan x, ReadOnlySpan y, float addend, Span destination) => + InvokeSpanSpanScalarIntoSpan(x, y, addend, destination); + + /// Computes the element-wise result of ( * ) * for the specified tensors of single-precision floating-point numbers. + /// The first tensor, represented as a span. + /// The second tensor, represented as a scalar. + /// The third tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = ([i] * ) + [i]. + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void MultiplyAdd(ReadOnlySpan x, float y, ReadOnlySpan addend, Span destination) => + InvokeSpanScalarSpanIntoSpan(x, y, addend, destination); - /// Computes the product of all elements in . + /// Computes the element-wise negation of each single-precision floating-point number in the specified tensor. + /// The tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = -[i]. + /// + /// + /// If any of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void Negate(ReadOnlySpan x, Span destination) => + InvokeSpanIntoSpan(x, destination); + + /// Computes the Euclidean norm of the specified tensor of single-precision floating-point numbers. + /// The first tensor, represented as a span. + /// The norm. + /// + /// + /// This method effectively computes MathF.Sqrt(TensorPrimitives.SumOfSquares(x)). + /// This is often referred to as the Euclidean norm or L2 norm. + /// It corresponds to the nrm2 method defined by BLAS1. + /// + /// + /// If any of the input values is equal to , the result value is also NaN. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float Norm(ReadOnlySpan x) => + MathF.Sqrt(SumOfSquares(x)); + + /// Computes the product of all elements in the specified non-empty tensor of single-precision floating-point numbers. /// The tensor, represented as a span. /// The result of multiplying all elements in . - /// Length of '' must be greater than zero. + /// Length of must be greater than zero. + /// + /// + /// If any of the input values is equal to , the result value is also NaN. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// public static float Product(ReadOnlySpan x) { if (x.IsEmpty) @@ -903,17 +903,31 @@ public static float Product(ReadOnlySpan x) ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); } - return Aggregate(1.0f, x); + return Aggregate(x); } - /// Computes the product of the element-wise result of: + . + /// Computes the product of the element-wise differences of the single-precision floating-point numbers in the specified non-empty tensors. /// The first tensor, represented as a span. /// The second tensor, represented as a span. - /// The result of multiplying the element-wise additions of the elements in each tensor. + /// The result of multiplying the element-wise subtraction of the elements in the second tensor from the first tensor. /// Length of both input spans must be greater than zero. /// and must have the same length. - /// This method effectively does .Product(.Add(, )). - public static float ProductOfSums(ReadOnlySpan x, ReadOnlySpan y) + /// + /// + /// This method effectively computes: + /// + /// Span<float> differences = ...; + /// TensorPrimitives.Subtract(x, y, differences); + /// float result = TensorPrimitives.Product(differences); + /// + /// but without requiring additional temporary storage for the intermediate differences. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float ProductOfDifferences(ReadOnlySpan x, ReadOnlySpan y) { if (x.IsEmpty) { @@ -925,17 +939,31 @@ public static float ProductOfSums(ReadOnlySpan x, ReadOnlySpan y) ThrowHelper.ThrowArgument_SpansMustHaveSameLength(); } - return Aggregate(1.0f, x, y); + return Aggregate(x, y); } - /// Computes the product of the element-wise result of: - . + /// Computes the product of the element-wise sums of the single-precision floating-point numbers in the specified non-empty tensors. /// The first tensor, represented as a span. /// The second tensor, represented as a span. - /// The result of multiplying the element-wise subtraction of the elements in the second tensor from the first tensor. + /// The result of multiplying the element-wise additions of the elements in each tensor. /// Length of both input spans must be greater than zero. /// and must have the same length. - /// This method effectively does .Product(.Subtract(, )). - public static float ProductOfDifferences(ReadOnlySpan x, ReadOnlySpan y) + /// + /// + /// This method effectively computes: + /// + /// Span<float> sums = ...; + /// TensorPrimitives.Add(x, y, sums); + /// float result = TensorPrimitives.Product(sums); + /// + /// but without requiring additional temporary storage for the intermediate sums. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float ProductOfSums(ReadOnlySpan x, ReadOnlySpan y) { if (x.IsEmpty) { @@ -947,7 +975,288 @@ public static float ProductOfDifferences(ReadOnlySpan x, ReadOnlySpan(1.0f, x, y); + return Aggregate(x, y); + } + + /// Computes the element-wise sigmoid function on the specified non-empty tensor of single-precision floating-point numbers. + /// The tensor, represented as a span. + /// The destination tensor. + /// Destination is too short. + /// must not be empty. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = 1f / (1f + .Exp(-[i])). + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void Sigmoid(ReadOnlySpan x, Span destination) + { + if (x.IsEmpty) + { + ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); + } + + if (x.Length > destination.Length) + { + ThrowHelper.ThrowArgument_DestinationTooShort(); + } + + ValidateInputOutputSpanNonOverlapping(x, destination); + + for (int i = 0; i < x.Length; i++) + { + destination[i] = 1f / (1f + MathF.Exp(-x[i])); + } + } + + /// Computes the element-wise hyperbolic sine of each single-precision floating-point radian angle in the specified tensor. + /// The tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = .Sinh([i]). + /// + /// + /// If a value is equal to , , or , + /// the corresponding destination location is set to that value. + /// + /// + /// The angles in x must be in radians. Use or multiply by /180 to convert degrees to radians. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void Sinh(ReadOnlySpan x, Span destination) + { + if (x.Length > destination.Length) + { + ThrowHelper.ThrowArgument_DestinationTooShort(); + } + + ValidateInputOutputSpanNonOverlapping(x, destination); + + for (int i = 0; i < x.Length; i++) + { + destination[i] = MathF.Sinh(x[i]); + } + } + + /// Computes the softmax function over the specified non-empty tensor of single-precision floating-point numbers. + /// The tensor, represented as a span. + /// The destination tensor. + /// Destination is too short. + /// must not be empty. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes a sum of MathF.Exp(x[i]) for all elements in . + /// It then effectively computes [i] = MathF.Exp([i]) / sum. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void SoftMax(ReadOnlySpan x, Span destination) + { + if (x.IsEmpty) + { + ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); + } + + if (x.Length > destination.Length) + { + ThrowHelper.ThrowArgument_DestinationTooShort(); + } + + ValidateInputOutputSpanNonOverlapping(x, destination); + + float expSum = 0f; + + for (int i = 0; i < x.Length; i++) + { + expSum += MathF.Exp(x[i]); + } + + for (int i = 0; i < x.Length; i++) + { + destination[i] = MathF.Exp(x[i]) / expSum; + } + } + + /// Computes the element-wise difference between single-precision floating-point numbers in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a scalar. + /// The destination tensor, represented as a span. + /// Length of must be same as length of . + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = [i] - [i]. + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void Subtract(ReadOnlySpan x, ReadOnlySpan y, Span destination) => + InvokeSpanSpanIntoSpan(x, y, destination); + + /// Computes the element-wise difference between single-precision floating-point numbers in the specified tensors. + /// The first tensor, represented as a span. + /// The second tensor, represented as a scalar. + /// The destination tensor, represented as a span. + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = [i] - . + /// + /// + /// If either of the element-wise input values is equal to , the resulting element-wise value is also NaN. + /// + /// + public static void Subtract(ReadOnlySpan x, float y, Span destination) => + InvokeSpanScalarIntoSpan(x, y, destination); + + /// Computes the sum of all elements in the specified tensor of single-precision floating-point numbers. + /// The tensor, represented as a span. + /// The result of adding all elements in , or zero if is empty. + /// + /// + /// If any of the values in the input is equal to , the result is also NaN. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float Sum(ReadOnlySpan x) => + Aggregate(x); + + /// Computes the sum of the absolute values of every element in the specified tensor of single-precision floating-point numbers. + /// The tensor, represented as a span. + /// The result of adding the absolute value of every element in , or zero if is empty. + /// + /// + /// This method effectively computes: + /// + /// Span<float> absoluteValues = ...; + /// TensorPrimitives.Abs(x, absoluteValues); + /// float result = TensorPrimitives.Sum(absoluteValues); + /// + /// but without requiring intermediate storage for the absolute values. It corresponds to the asum method defined by BLAS1. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float SumOfMagnitudes(ReadOnlySpan x) => + Aggregate(x); + + /// Computes the sum of the square of every element in the specified tensor of single-precision floating-point numbers. + /// The tensor, represented as a span. + /// The result of adding the square of every element in , or zero if is empty. + /// + /// + /// This method effectively computes: + /// + /// Span<float> squaredValues = ...; + /// TensorPrimitives.Multiply(x, x, squaredValues); + /// float result = TensorPrimitives.Sum(squaredValues); + /// + /// but without requiring intermediate storage for the squared values. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static float SumOfSquares(ReadOnlySpan x) => + Aggregate(x); + + /// Computes the element-wise hyperbolic tangent of each single-precision floating-point radian angle in the specified tensor. + /// The tensor, represented as a span. + /// The destination tensor, represented as a span. + /// Destination is too short. + /// and reference overlapping memory locations and do not begin at the same location. + /// + /// + /// This method effectively computes [i] = .Tanh([i]). + /// + /// + /// If a value is equal to , the corresponding destination location is set to -1. + /// If a value is equal to , the corresponding destination location is set to 1. + /// If a value is equal to , the corresponding destination location is set to NaN. + /// + /// + /// The angles in x must be in radians. Use or multiply by /180 to convert degrees to radians. + /// + /// + /// This method may call into the underlying C runtime or employ instructions specific to the current architecture. Exact results may differ between different + /// operating systems or architectures. + /// + /// + public static void Tanh(ReadOnlySpan x, Span destination) + { + if (x.Length > destination.Length) + { + ThrowHelper.ThrowArgument_DestinationTooShort(); + } + + ValidateInputOutputSpanNonOverlapping(x, destination); + + for (int i = 0; i < x.Length; i++) + { + destination[i] = MathF.Tanh(x[i]); + } } + + /// Throws an exception if the and spans overlap and don't begin at the same memory location. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void ValidateInputOutputSpanNonOverlapping(ReadOnlySpan input, Span output) + { + if (!Unsafe.AreSame(ref MemoryMarshal.GetReference(input), ref MemoryMarshal.GetReference(output)) && + input.Overlaps(output)) + { + ThrowHelper.ThrowArgument_InputAndDestinationSpanMustNotOverlap(); + } + } + + /// Mask used to handle remaining elements after vectorized handling of the input. + /// + /// Logically 16 rows of 16 uints. The Nth row should be used to handle N remaining elements at the + /// end of the input, where elements in the vector prior to that will be zero'd. + /// + private static ReadOnlySpan RemainderUInt32Mask_16x16 => new uint[] + { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + }; } } diff --git a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.netcore.cs b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.netcore.cs index 6323660cb75fd..7e13b54148e4e 100644 --- a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.netcore.cs +++ b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.netcore.cs @@ -19,6 +19,14 @@ public static partial class TensorPrimitives /// The source span from which to copy values. /// The destination span into which the converted values should be written. /// Destination is too short. + /// + /// + /// This method effectively computes [i] = (Half)[i]. + /// + /// + /// and must not overlap. If they do, behavior is undefined. + /// + /// public static void ConvertToHalf(ReadOnlySpan source, Span destination) { if (source.Length > destination.Length) @@ -26,10 +34,301 @@ public static void ConvertToHalf(ReadOnlySpan source, Span destinat ThrowHelper.ThrowArgument_DestinationTooShort(); } - for (int i = 0; i < source.Length; i++) + ref float sourceRef = ref MemoryMarshal.GetReference(source); + ref ushort destinationRef = ref Unsafe.As(ref MemoryMarshal.GetReference(destination)); + int i = 0, twoVectorsFromEnd; + +#if NET8_0_OR_GREATER + if (Vector512.IsHardwareAccelerated) + { + twoVectorsFromEnd = source.Length - (Vector512.Count * 2); + if (i <= twoVectorsFromEnd) + { + // Loop handling two input vectors / one output vector at a time. + do + { + Vector512 lower = SingleToHalfAsWidenedUInt32_Vector512(Vector512.LoadUnsafe(ref sourceRef, (uint)i)); + Vector512 upper = SingleToHalfAsWidenedUInt32_Vector512(Vector512.LoadUnsafe(ref sourceRef, (uint)(i + Vector512.Count))); + Vector512.Narrow(lower, upper).StoreUnsafe(ref destinationRef, (uint)i); + + i += Vector512.Count * 2; + } + while (i <= twoVectorsFromEnd); + + // Handle any remaining elements with final vectors. + if (i != source.Length) + { + i = source.Length - (Vector512.Count * 2); + + Vector512 lower = SingleToHalfAsWidenedUInt32_Vector512(Vector512.LoadUnsafe(ref sourceRef, (uint)i)); + Vector512 upper = SingleToHalfAsWidenedUInt32_Vector512(Vector512.LoadUnsafe(ref sourceRef, (uint)(i + Vector512.Count))); + Vector512.Narrow(lower, upper).StoreUnsafe(ref destinationRef, (uint)i); + } + + return; + } + } +#endif + + if (Vector256.IsHardwareAccelerated) + { + twoVectorsFromEnd = source.Length - (Vector256.Count * 2); + if (i <= twoVectorsFromEnd) + { + // Loop handling two input vectors / one output vector at a time. + do + { + Vector256 lower = SingleToHalfAsWidenedUInt32_Vector256(Vector256.LoadUnsafe(ref sourceRef, (uint)i)); + Vector256 upper = SingleToHalfAsWidenedUInt32_Vector256(Vector256.LoadUnsafe(ref sourceRef, (uint)(i + Vector256.Count))); + Vector256 halfs = Vector256.Narrow(lower, upper); + halfs.StoreUnsafe(ref destinationRef, (uint)i); + + i += Vector256.Count * 2; + } + while (i <= twoVectorsFromEnd); + + // Handle any remaining elements with final vectors. + if (i != source.Length) + { + i = source.Length - (Vector256.Count * 2); + + Vector256 lower = SingleToHalfAsWidenedUInt32_Vector256(Vector256.LoadUnsafe(ref sourceRef, (uint)i)); + Vector256 upper = SingleToHalfAsWidenedUInt32_Vector256(Vector256.LoadUnsafe(ref sourceRef, (uint)(i + Vector256.Count))); + Vector256.Narrow(lower, upper).StoreUnsafe(ref destinationRef, (uint)i); + } + + return; + } + } + + if (Vector128.IsHardwareAccelerated) + { + twoVectorsFromEnd = source.Length - (Vector128.Count * 2); + if (i <= twoVectorsFromEnd) + { + // Loop handling two input vectors / one output vector at a time. + do + { + Vector128 lower = SingleToHalfAsWidenedUInt32_Vector128(Vector128.LoadUnsafe(ref sourceRef, (uint)i)); + Vector128 upper = SingleToHalfAsWidenedUInt32_Vector128(Vector128.LoadUnsafe(ref sourceRef, (uint)(i + Vector128.Count))); + Vector128.Narrow(lower, upper).StoreUnsafe(ref destinationRef, (uint)i); + + i += Vector128.Count * 2; + } + while (i <= twoVectorsFromEnd); + + // Handle any remaining elements with final vectors. + if (i != source.Length) + { + i = source.Length - (Vector128.Count * 2); + + Vector128 lower = SingleToHalfAsWidenedUInt32_Vector128(Vector128.LoadUnsafe(ref sourceRef, (uint)i)); + Vector128 upper = SingleToHalfAsWidenedUInt32_Vector128(Vector128.LoadUnsafe(ref sourceRef, (uint)(i + Vector128.Count))); + Vector128.Narrow(lower, upper).StoreUnsafe(ref destinationRef, (uint)i); + } + + return; + } + } + + while (i < source.Length) + { + Unsafe.Add(ref destinationRef, i) = BitConverter.HalfToUInt16Bits((Half)Unsafe.Add(ref sourceRef, i)); + i++; + } + + // This implements a vectorized version of the `explicit operator Half(float value) operator`. + // See detailed description of the algorithm used here: + // https://github.com/dotnet/runtime/blob/ca8d6f0420096831766ec11c7d400e4f7ccc7a34/src/libraries/System.Private.CoreLib/src/System/Half.cs#L606-L714 + // The cast operator converts a float to a Half represented as a UInt32, then narrows to a UInt16, and reinterpret casts to Half. + // This does the same, with an input VectorXx and an output VectorXx. + // Loop handling two input vectors at a time; each input float is double the size of each output Half, + // so we need two vectors of floats to produce one vector of Halfs. Half isn't supported in VectorXx, + // so we convert the VectorXx to a VectorXx, and the caller then uses this twice, narrows the combination + // into a VectorXx, and then saves that out to the destination `ref Half` reinterpreted as `ref ushort`. + + #pragma warning disable IDE0059 // https://github.com/dotnet/roslyn/issues/44948 + const uint MinExp = 0x3880_0000u; // Minimum exponent for rounding + const uint Exponent126 = 0x3f00_0000u; // Exponent displacement #1 + const uint SingleBiasedExponentMask = 0x7F80_0000; // float.BiasedExponentMask; // Exponent mask + const uint Exponent13 = 0x0680_0000u; // Exponent displacement #2 + const float MaxHalfValueBelowInfinity = 65520.0f; // Maximum value that is not Infinity in Half + const uint ExponentMask = 0x7C00; // Mask for exponent bits in Half + const uint SingleSignMask = 0x8000_0000u; // float.SignMask; // Mask for sign bit in float + #pragma warning restore IDE0059 + + static Vector128 SingleToHalfAsWidenedUInt32_Vector128(Vector128 value) + { + Vector128 bitValue = value.AsUInt32(); + + // Extract sign bit + Vector128 sign = Vector128.ShiftRightLogical(bitValue & Vector128.Create(SingleSignMask), 16); + + // Detecting NaN (0u if value is NaN; otherwise, ~0u) + Vector128 realMask = Vector128.Equals(value, value).AsUInt32(); + + // Clear sign bit + value = Vector128.Abs(value); + + // Rectify values that are Infinity in Half. + value = Vector128.Min(Vector128.Create(MaxHalfValueBelowInfinity), value); + + // Rectify lower exponent + Vector128 exponentOffset0 = Vector128.Max(value, Vector128.Create(MinExp).AsSingle()).AsUInt32(); + + // Extract exponent + exponentOffset0 &= Vector128.Create(SingleBiasedExponentMask); + + // Add exponent by 13 + exponentOffset0 += Vector128.Create(Exponent13); + + // Round Single into Half's precision (NaN also gets modified here, just setting the MSB of fraction) + value += exponentOffset0.AsSingle(); + bitValue = value.AsUInt32(); + + // Only exponent bits will be modified if NaN + Vector128 maskedHalfExponentForNaN = ~realMask & Vector128.Create(ExponentMask); + + // Subtract exponent by 126 + bitValue -= Vector128.Create(Exponent126); + + // Shift bitValue right by 13 bits to match the boundary of exponent part and fraction part. + Vector128 newExponent = Vector128.ShiftRightLogical(bitValue, 13); + + // Clear the fraction parts if the value was NaN. + bitValue &= realMask; + + // Merge the exponent part with fraction part, and add the exponent part and fraction part's overflow. + bitValue += newExponent; + + // Clear exponents if value is NaN + bitValue &= ~maskedHalfExponentForNaN; + + // Merge sign bit with possible NaN exponent + Vector128 signAndMaskedExponent = maskedHalfExponentForNaN | sign; + + // Merge sign bit and possible NaN exponent + bitValue |= signAndMaskedExponent; + + // The final result + return bitValue; + } + + static Vector256 SingleToHalfAsWidenedUInt32_Vector256(Vector256 value) + { + Vector256 bitValue = value.AsUInt32(); + + // Extract sign bit + Vector256 sign = Vector256.ShiftRightLogical(bitValue & Vector256.Create(SingleSignMask), 16); + + // Detecting NaN (0u if value is NaN; otherwise, ~0u) + Vector256 realMask = Vector256.Equals(value, value).AsUInt32(); + + // Clear sign bit + value = Vector256.Abs(value); + + // Rectify values that are Infinity in Half. + value = Vector256.Min(Vector256.Create(MaxHalfValueBelowInfinity), value); + + // Rectify lower exponent + Vector256 exponentOffset0 = Vector256.Max(value, Vector256.Create(MinExp).AsSingle()).AsUInt32(); + + // Extract exponent + exponentOffset0 &= Vector256.Create(SingleBiasedExponentMask); + + // Add exponent by 13 + exponentOffset0 += Vector256.Create(Exponent13); + + // Round Single into Half's precision (NaN also gets modified here, just setting the MSB of fraction) + value += exponentOffset0.AsSingle(); + bitValue = value.AsUInt32(); + + // Only exponent bits will be modified if NaN + Vector256 maskedHalfExponentForNaN = ~realMask & Vector256.Create(ExponentMask); + + // Subtract exponent by 126 + bitValue -= Vector256.Create(Exponent126); + + // Shift bitValue right by 13 bits to match the boundary of exponent part and fraction part. + Vector256 newExponent = Vector256.ShiftRightLogical(bitValue, 13); + + // Clear the fraction parts if the value was NaN. + bitValue &= realMask; + + // Merge the exponent part with fraction part, and add the exponent part and fraction part's overflow. + bitValue += newExponent; + + // Clear exponents if value is NaN + bitValue &= ~maskedHalfExponentForNaN; + + // Merge sign bit with possible NaN exponent + Vector256 signAndMaskedExponent = maskedHalfExponentForNaN | sign; + + // Merge sign bit and possible NaN exponent + bitValue |= signAndMaskedExponent; + + // The final result + return bitValue; + } + +#if NET8_0_OR_GREATER + static Vector512 SingleToHalfAsWidenedUInt32_Vector512(Vector512 value) { - destination[i] = (Half)source[i]; + Vector512 bitValue = value.AsUInt32(); + + // Extract sign bit + Vector512 sign = Vector512.ShiftRightLogical(bitValue & Vector512.Create(SingleSignMask), 16); + + // Detecting NaN (0u if value is NaN; otherwise, ~0u) + Vector512 realMask = Vector512.Equals(value, value).AsUInt32(); + + // Clear sign bit + value = Vector512.Abs(value); + + // Rectify values that are Infinity in Half. + value = Vector512.Min(Vector512.Create(MaxHalfValueBelowInfinity), value); + + // Rectify lower exponent + Vector512 exponentOffset0 = Vector512.Max(value, Vector512.Create(MinExp).AsSingle()).AsUInt32(); + + // Extract exponent + exponentOffset0 &= Vector512.Create(SingleBiasedExponentMask); + + // Add exponent by 13 + exponentOffset0 += Vector512.Create(Exponent13); + + // Round Single into Half's precision (NaN also gets modified here, just setting the MSB of fraction) + value += exponentOffset0.AsSingle(); + bitValue = value.AsUInt32(); + + // Only exponent bits will be modified if NaN + Vector512 maskedHalfExponentForNaN = ~realMask & Vector512.Create(ExponentMask); + + // Subtract exponent by 126 + bitValue -= Vector512.Create(Exponent126); + + // Shift bitValue right by 13 bits to match the boundary of exponent part and fraction part. + Vector512 newExponent = Vector512.ShiftRightLogical(bitValue, 13); + + // Clear the fraction parts if the value was NaN. + bitValue &= realMask; + + // Merge the exponent part with fraction part, and add the exponent part and fraction part's overflow. + bitValue += newExponent; + + // Clear exponents if value is NaN + bitValue &= ~maskedHalfExponentForNaN; + + // Merge sign bit with possible NaN exponent + Vector512 signAndMaskedExponent = maskedHalfExponentForNaN | sign; + + // Merge sign bit and possible NaN exponent + bitValue |= signAndMaskedExponent; + + // The final result + return bitValue; } +#endif } /// @@ -39,6 +338,14 @@ public static void ConvertToHalf(ReadOnlySpan source, Span destinat /// The source span from which to copy values. /// The destination span into which the converted values should be written. /// Destination is too short. + /// + /// + /// This method effectively computes [i] = (float)[i]. + /// + /// + /// and must not overlap. If they do, behavior is undefined. + /// + /// public static void ConvertToSingle(ReadOnlySpan source, Span destination) { if (source.Length > destination.Length) @@ -46,19 +353,257 @@ public static void ConvertToSingle(ReadOnlySpan source, Span destin ThrowHelper.ThrowArgument_DestinationTooShort(); } - for (int i = 0; i < source.Length; i++) + ref short sourceRef = ref Unsafe.As(ref MemoryMarshal.GetReference(source)); + ref float destinationRef = ref MemoryMarshal.GetReference(destination); + int i = 0, oneVectorFromEnd; + +#if NET8_0_OR_GREATER + if (Vector512.IsHardwareAccelerated) { - destination[i] = (float)source[i]; + oneVectorFromEnd = source.Length - Vector512.Count; + if (i <= oneVectorFromEnd) + { + // Loop handling one input vector / two output vectors at a time. + do + { + (Vector512 lower, Vector512 upper) = Vector512.Widen(Vector512.LoadUnsafe(ref sourceRef, (uint)i)); + HalfAsWidenedUInt32ToSingle_Vector512(lower.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)i); + HalfAsWidenedUInt32ToSingle_Vector512(upper.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)(i + Vector512.Count)); + + i += Vector512.Count; + } + while (i <= oneVectorFromEnd); + + // Handle any remaining elements with a final input vector. + if (i != source.Length) + { + i = source.Length - Vector512.Count; + + (Vector512 lower, Vector512 upper) = Vector512.Widen(Vector512.LoadUnsafe(ref sourceRef, (uint)i)); + HalfAsWidenedUInt32ToSingle_Vector512(lower.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)i); + HalfAsWidenedUInt32ToSingle_Vector512(upper.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)(i + Vector512.Count)); + } + + return; + } } - } +#endif - private static bool IsNegative(float f) => float.IsNegative(f); + if (Vector256.IsHardwareAccelerated) + { + oneVectorFromEnd = source.Length - Vector256.Count; + if (i <= oneVectorFromEnd) + { + // Loop handling one input vector / two output vectors at a time. + do + { + (Vector256 lower, Vector256 upper) = Vector256.Widen(Vector256.LoadUnsafe(ref sourceRef, (uint)i)); + HalfAsWidenedUInt32ToSingle_Vector256(lower.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)i); + HalfAsWidenedUInt32ToSingle_Vector256(upper.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)(i + Vector256.Count)); - private static float MaxMagnitude(float x, float y) => MathF.MaxMagnitude(x, y); + i += Vector256.Count; + } + while (i <= oneVectorFromEnd); - private static float MinMagnitude(float x, float y) => MathF.MinMagnitude(x, y); + // Handle any remaining elements with a final input vector. + if (i != source.Length) + { + i = source.Length - Vector256.Count; - private static float Log2(float x) => MathF.Log2(x); + (Vector256 lower, Vector256 upper) = Vector256.Widen(Vector256.LoadUnsafe(ref sourceRef, (uint)i)); + HalfAsWidenedUInt32ToSingle_Vector256(lower.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)i); + HalfAsWidenedUInt32ToSingle_Vector256(upper.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)(i + Vector256.Count)); + } + + return; + } + } + + if (Vector128.IsHardwareAccelerated) + { + oneVectorFromEnd = source.Length - Vector128.Count; + if (i <= oneVectorFromEnd) + { + // Loop handling one input vector / two output vectors at a time. + do + { + (Vector128 lower, Vector128 upper) = Vector128.Widen(Vector128.LoadUnsafe(ref sourceRef, (uint)i)); + HalfAsWidenedUInt32ToSingle_Vector128(lower.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)i); + HalfAsWidenedUInt32ToSingle_Vector128(upper.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)(i + Vector128.Count)); + + i += Vector128.Count; + } + while (i <= oneVectorFromEnd); + + // Handle any remaining elements with a final input vector. + if (i != source.Length) + { + i = source.Length - Vector128.Count; + + (Vector128 lower, Vector128 upper) = Vector128.Widen(Vector128.LoadUnsafe(ref sourceRef, (uint)i)); + HalfAsWidenedUInt32ToSingle_Vector128(lower.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)i); + HalfAsWidenedUInt32ToSingle_Vector128(upper.AsUInt32()).StoreUnsafe(ref destinationRef, (uint)(i + Vector128.Count)); + } + + return; + } + } + + while (i < source.Length) + { + Unsafe.Add(ref destinationRef, i) = (float)Unsafe.As(ref Unsafe.Add(ref sourceRef, i)); + i++; + } + + // This implements a vectorized version of the `explicit operator float(Half value) operator`. + // See detailed description of the algorithm used here: + // https://github.com/dotnet/runtime/blob/3bf40a378f00cb5bf18ff62796bc7097719b974c/src/libraries/System.Private.CoreLib/src/System/Half.cs#L1010-L1040 + // The cast operator converts a Half represented as uint to a float. This does the same, with an input VectorXx and an output VectorXx. + // The VectorXx is created by reading a vector of Halfs as a VectorXx then widened to two VectorXxs and cast to VectorXxs. + // We loop handling one input vector at a time, producing two output float vectors. + + #pragma warning disable IDE0059 // https://github.com/dotnet/roslyn/issues/44948 + const uint ExponentLowerBound = 0x3880_0000u; // The smallest positive normal number in Half, converted to Single + const uint ExponentOffset = 0x3800_0000u; // BitConverter.SingleToUInt32Bits(1.0f) - ((uint)BitConverter.HalfToUInt16Bits((Half)1.0f) << 13) + const uint SingleSignMask = 0x8000_0000; // float.SignMask; // Mask for sign bit in Single + const uint HalfExponentMask = 0x7C00; // Mask for exponent bits in Half + const uint HalfToSingleBitsMask = 0x0FFF_E000; // Mask for bits in Single converted from Half + #pragma warning restore IDE0059 + + static Vector128 HalfAsWidenedUInt32ToSingle_Vector128(Vector128 value) + { + // Extract sign bit of value + Vector128 sign = value & Vector128.Create(SingleSignMask); + + // Copy sign bit to upper bits + Vector128 bitValueInProcess = value; + + // Extract exponent bits of value (BiasedExponent is not for here as it performs unnecessary shift) + Vector128 offsetExponent = bitValueInProcess & Vector128.Create(HalfExponentMask); + + // ~0u when value is subnormal, 0 otherwise + Vector128 subnormalMask = Vector128.Equals(offsetExponent, Vector128.Zero); + + // ~0u when value is either Infinity or NaN, 0 otherwise + Vector128 infinityOrNaNMask = Vector128.Equals(offsetExponent, Vector128.Create(HalfExponentMask)); + + // 0x3880_0000u if value is subnormal, 0 otherwise + Vector128 maskedExponentLowerBound = subnormalMask & Vector128.Create(ExponentLowerBound); + + // 0x3880_0000u if value is subnormal, 0x3800_0000u otherwise + Vector128 offsetMaskedExponentLowerBound = Vector128.Create(ExponentOffset) | maskedExponentLowerBound; + + // Match the position of the boundary of exponent bits and fraction bits with IEEE 754 Binary32(Single) + bitValueInProcess = Vector128.ShiftLeft(bitValueInProcess, 13); + + // Double the offsetMaskedExponentLowerBound if value is either Infinity or NaN + offsetMaskedExponentLowerBound = Vector128.ConditionalSelect(Vector128.Equals(infinityOrNaNMask, Vector128.Zero), + offsetMaskedExponentLowerBound, + Vector128.ShiftLeft(offsetMaskedExponentLowerBound, 1)); + + // Extract exponent bits and fraction bits of value + bitValueInProcess &= Vector128.Create(HalfToSingleBitsMask); + + // Adjust exponent to match the range of exponent + bitValueInProcess += offsetMaskedExponentLowerBound; + + // If value is subnormal, remove unnecessary 1 on top of fraction bits. + Vector128 absoluteValue = (bitValueInProcess.AsSingle() - maskedExponentLowerBound.AsSingle()).AsUInt32(); + + // Merge sign bit with rest + return (absoluteValue | sign).AsSingle(); + } + + static Vector256 HalfAsWidenedUInt32ToSingle_Vector256(Vector256 value) + { + // Extract sign bit of value + Vector256 sign = value & Vector256.Create(SingleSignMask); + + // Copy sign bit to upper bits + Vector256 bitValueInProcess = value; + + // Extract exponent bits of value (BiasedExponent is not for here as it performs unnecessary shift) + Vector256 offsetExponent = bitValueInProcess & Vector256.Create(HalfExponentMask); + + // ~0u when value is subnormal, 0 otherwise + Vector256 subnormalMask = Vector256.Equals(offsetExponent, Vector256.Zero); + + // ~0u when value is either Infinity or NaN, 0 otherwise + Vector256 infinityOrNaNMask = Vector256.Equals(offsetExponent, Vector256.Create(HalfExponentMask)); + + // 0x3880_0000u if value is subnormal, 0 otherwise + Vector256 maskedExponentLowerBound = subnormalMask & Vector256.Create(ExponentLowerBound); + + // 0x3880_0000u if value is subnormal, 0x3800_0000u otherwise + Vector256 offsetMaskedExponentLowerBound = Vector256.Create(ExponentOffset) | maskedExponentLowerBound; + + // Match the position of the boundary of exponent bits and fraction bits with IEEE 754 Binary32(Single) + bitValueInProcess = Vector256.ShiftLeft(bitValueInProcess, 13); + + // Double the offsetMaskedExponentLowerBound if value is either Infinity or NaN + offsetMaskedExponentLowerBound = Vector256.ConditionalSelect(Vector256.Equals(infinityOrNaNMask, Vector256.Zero), + offsetMaskedExponentLowerBound, + Vector256.ShiftLeft(offsetMaskedExponentLowerBound, 1)); + + // Extract exponent bits and fraction bits of value + bitValueInProcess &= Vector256.Create(HalfToSingleBitsMask); + + // Adjust exponent to match the range of exponent + bitValueInProcess += offsetMaskedExponentLowerBound; + + // If value is subnormal, remove unnecessary 1 on top of fraction bits. + Vector256 absoluteValue = (bitValueInProcess.AsSingle() - maskedExponentLowerBound.AsSingle()).AsUInt32(); + + // Merge sign bit with rest + return (absoluteValue | sign).AsSingle(); + } + +#if NET8_0_OR_GREATER + static Vector512 HalfAsWidenedUInt32ToSingle_Vector512(Vector512 value) + { + // Extract sign bit of value + Vector512 sign = value & Vector512.Create(SingleSignMask); + + // Copy sign bit to upper bits + Vector512 bitValueInProcess = value; + + // Extract exponent bits of value (BiasedExponent is not for here as it performs unnecessary shift) + Vector512 offsetExponent = bitValueInProcess & Vector512.Create(HalfExponentMask); + + // ~0u when value is subnormal, 0 otherwise + Vector512 subnormalMask = Vector512.Equals(offsetExponent, Vector512.Zero); + + // ~0u when value is either Infinity or NaN, 0 otherwise + Vector512 infinityOrNaNMask = Vector512.Equals(offsetExponent, Vector512.Create(HalfExponentMask)); + + // 0x3880_0000u if value is subnormal, 0 otherwise + Vector512 maskedExponentLowerBound = subnormalMask & Vector512.Create(ExponentLowerBound); + + // 0x3880_0000u if value is subnormal, 0x3800_0000u otherwise + Vector512 offsetMaskedExponentLowerBound = Vector512.Create(ExponentOffset) | maskedExponentLowerBound; + + // Match the position of the boundary of exponent bits and fraction bits with IEEE 754 Binary32(Single) + bitValueInProcess = Vector512.ShiftLeft(bitValueInProcess, 13); + + // Double the offsetMaskedExponentLowerBound if value is either Infinity or NaN + offsetMaskedExponentLowerBound = Vector512.ConditionalSelect(Vector512.Equals(infinityOrNaNMask, Vector512.Zero), + offsetMaskedExponentLowerBound, + Vector512.ShiftLeft(offsetMaskedExponentLowerBound, 1)); + + // Extract exponent bits and fraction bits of value + bitValueInProcess &= Vector512.Create(HalfToSingleBitsMask); + + // Adjust exponent to match the range of exponent + bitValueInProcess += offsetMaskedExponentLowerBound; + + // If value is subnormal, remove unnecessary 1 on top of fraction bits. + Vector512 absoluteValue = (bitValueInProcess.AsSingle() - maskedExponentLowerBound.AsSingle()).AsUInt32(); + + // Merge sign bit with rest + return (absoluteValue | sign).AsSingle(); + } +#endif + } private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan y) { @@ -66,12 +611,6 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan= Vector512.Count) { @@ -84,6 +623,7 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan.Count; + int i = 0; do { Vector512 xVec = Vector512.LoadUnsafe(ref xRef, (uint)i); @@ -97,13 +637,28 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan xVec = Vector512.LoadUnsafe(ref xRef, (uint)(x.Length - Vector512.Count)); + Vector512 yVec = Vector512.LoadUnsafe(ref yRef, (uint)(x.Length - Vector512.Count)); + + Vector512 remainderMask = LoadRemainderMaskSingleVector512(x.Length - i); + xVec &= remainderMask; + yVec &= remainderMask; + + dotProductVector = FusedMultiplyAdd(xVec, yVec, dotProductVector); + xSumOfSquaresVector = FusedMultiplyAdd(xVec, xVec, xSumOfSquaresVector); + ySumOfSquaresVector = FusedMultiplyAdd(yVec, yVec, ySumOfSquaresVector); + } + + // Sum(X * Y) / (|X| * |Y|) + return + Vector512.Sum(dotProductVector) / + (MathF.Sqrt(Vector512.Sum(xSumOfSquaresVector)) * MathF.Sqrt(Vector512.Sum(ySumOfSquaresVector))); } - else #endif + if (Vector256.IsHardwareAccelerated && x.Length >= Vector256.Count) { ref float xRef = ref MemoryMarshal.GetReference(x); @@ -115,6 +670,7 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan.Count; + int i = 0; do { Vector256 xVec = Vector256.LoadUnsafe(ref xRef, (uint)i); @@ -128,12 +684,28 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan xVec = Vector256.LoadUnsafe(ref xRef, (uint)(x.Length - Vector256.Count)); + Vector256 yVec = Vector256.LoadUnsafe(ref yRef, (uint)(x.Length - Vector256.Count)); + + Vector256 remainderMask = LoadRemainderMaskSingleVector256(x.Length - i); + xVec &= remainderMask; + yVec &= remainderMask; + + dotProductVector = FusedMultiplyAdd(xVec, yVec, dotProductVector); + xSumOfSquaresVector = FusedMultiplyAdd(xVec, xVec, xSumOfSquaresVector); + ySumOfSquaresVector = FusedMultiplyAdd(yVec, yVec, ySumOfSquaresVector); + } + + // Sum(X * Y) / (|X| * |Y|) + return + Vector256.Sum(dotProductVector) / + (MathF.Sqrt(Vector256.Sum(xSumOfSquaresVector)) * MathF.Sqrt(Vector256.Sum(ySumOfSquaresVector))); } - else if (Vector128.IsHardwareAccelerated && x.Length >= Vector128.Count) + + if (Vector128.IsHardwareAccelerated && x.Length >= Vector128.Count) { ref float xRef = ref MemoryMarshal.GetReference(x); ref float yRef = ref MemoryMarshal.GetReference(y); @@ -144,6 +716,7 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan.Count; + int i = 0; do { Vector128 xVec = Vector128.LoadUnsafe(ref xRef, (uint)i); @@ -157,14 +730,31 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan xVec = Vector128.LoadUnsafe(ref xRef, (uint)(x.Length - Vector128.Count)); + Vector128 yVec = Vector128.LoadUnsafe(ref yRef, (uint)(x.Length - Vector128.Count)); + + Vector128 remainderMask = LoadRemainderMaskSingleVector128(x.Length - i); + xVec &= remainderMask; + yVec &= remainderMask; + + dotProductVector = FusedMultiplyAdd(xVec, yVec, dotProductVector); + xSumOfSquaresVector = FusedMultiplyAdd(xVec, xVec, xSumOfSquaresVector); + ySumOfSquaresVector = FusedMultiplyAdd(yVec, yVec, ySumOfSquaresVector); + } + + // Sum(X * Y) / (|X| * |Y|) + return + Vector128.Sum(dotProductVector) / + (MathF.Sqrt(Vector128.Sum(xSumOfSquaresVector)) * MathF.Sqrt(Vector128.Sum(ySumOfSquaresVector))); } - // Process any remaining elements past the last vector. - for (; (uint)i < (uint)x.Length; i++) + // Vectorization isn't supported or there are too few elements to vectorize. + // Use a scalar implementation. + float dotProduct = 0f, xSumOfSquares = 0f, ySumOfSquares = 0f; + for (int i = 0; i < x.Length; i++) { dotProduct = MathF.FusedMultiplyAdd(x[i], y[i], dotProduct); xSumOfSquares = MathF.FusedMultiplyAdd(x[i], x[i], xSumOfSquares); @@ -172,191 +762,447 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan( - float identityValue, ReadOnlySpan x) - where TLoad : IUnaryOperator - where TAggregate : IBinaryOperator + ReadOnlySpan x) + where TLoad : struct, IUnaryOperator + where TAggregate : struct, IAggregationOperator { - // Initialize the result to the identity value - float result = identityValue; - int i = 0; + if (x.Length == 0) + { + return 0; + } + + ref float xRef = ref MemoryMarshal.GetReference(x); #if NET8_0_OR_GREATER - if (Vector512.IsHardwareAccelerated && x.Length >= Vector512.Count * 2) + if (Vector512.IsHardwareAccelerated && x.Length >= Vector512.Count) { - ref float xRef = ref MemoryMarshal.GetReference(x); - // Load the first vector as the initial set of results - Vector512 resultVector = TLoad.Invoke(Vector512.LoadUnsafe(ref xRef, 0)); + Vector512 result = TLoad.Invoke(Vector512.LoadUnsafe(ref xRef, 0)); int oneVectorFromEnd = x.Length - Vector512.Count; + int i = Vector512.Count; // Aggregate additional vectors into the result as long as there's at // least one full vector left to process. - i = Vector512.Count; - do + while (i <= oneVectorFromEnd) { - resultVector = TAggregate.Invoke(resultVector, TLoad.Invoke(Vector512.LoadUnsafe(ref xRef, (uint)i))); + result = TAggregate.Invoke(result, TLoad.Invoke(Vector512.LoadUnsafe(ref xRef, (uint)i))); i += Vector512.Count; } - while (i <= oneVectorFromEnd); + + // Process the last vector in the span, masking off elements already processed. + if (i != x.Length) + { + result = TAggregate.Invoke(result, + Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + Vector512.Create(TAggregate.IdentityValue), + TLoad.Invoke(Vector512.LoadUnsafe(ref xRef, (uint)(x.Length - Vector512.Count))))); + } // Aggregate the lanes in the vector back into the scalar result - result = TAggregate.Invoke(result, TAggregate.Invoke(resultVector)); + return TAggregate.Invoke(result); } - else #endif - if (Vector256.IsHardwareAccelerated && x.Length >= Vector256.Count * 2) - { - ref float xRef = ref MemoryMarshal.GetReference(x); + if (Vector256.IsHardwareAccelerated && x.Length >= Vector256.Count) + { // Load the first vector as the initial set of results - Vector256 resultVector = TLoad.Invoke(Vector256.LoadUnsafe(ref xRef, 0)); + Vector256 result = TLoad.Invoke(Vector256.LoadUnsafe(ref xRef, 0)); int oneVectorFromEnd = x.Length - Vector256.Count; + int i = Vector256.Count; // Aggregate additional vectors into the result as long as there's at // least one full vector left to process. - i = Vector256.Count; - do + while (i <= oneVectorFromEnd) { - resultVector = TAggregate.Invoke(resultVector, TLoad.Invoke(Vector256.LoadUnsafe(ref xRef, (uint)i))); + result = TAggregate.Invoke(result, TLoad.Invoke(Vector256.LoadUnsafe(ref xRef, (uint)i))); i += Vector256.Count; } - while (i <= oneVectorFromEnd); + + // Process the last vector in the span, masking off elements already processed. + if (i != x.Length) + { + result = TAggregate.Invoke(result, + Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + Vector256.Create(TAggregate.IdentityValue), + TLoad.Invoke(Vector256.LoadUnsafe(ref xRef, (uint)(x.Length - Vector256.Count))))); + } // Aggregate the lanes in the vector back into the scalar result - result = TAggregate.Invoke(result, TAggregate.Invoke(resultVector)); + return TAggregate.Invoke(result); } - else if (Vector128.IsHardwareAccelerated && x.Length >= Vector128.Count * 2) - { - ref float xRef = ref MemoryMarshal.GetReference(x); + if (Vector128.IsHardwareAccelerated && x.Length >= Vector128.Count) + { // Load the first vector as the initial set of results - Vector128 resultVector = TLoad.Invoke(Vector128.LoadUnsafe(ref xRef, 0)); + Vector128 result = TLoad.Invoke(Vector128.LoadUnsafe(ref xRef, 0)); int oneVectorFromEnd = x.Length - Vector128.Count; + int i = Vector128.Count; // Aggregate additional vectors into the result as long as there's at // least one full vector left to process. - i = Vector128.Count; - do + while (i <= oneVectorFromEnd) { - resultVector = TAggregate.Invoke(resultVector, TLoad.Invoke(Vector128.LoadUnsafe(ref xRef, (uint)i))); + result = TAggregate.Invoke(result, TLoad.Invoke(Vector128.LoadUnsafe(ref xRef, (uint)i))); i += Vector128.Count; } - while (i <= oneVectorFromEnd); + + // Process the last vector in the span, masking off elements already processed. + if (i != x.Length) + { + result = TAggregate.Invoke(result, + Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + Vector128.Create(TAggregate.IdentityValue), + TLoad.Invoke(Vector128.LoadUnsafe(ref xRef, (uint)(x.Length - Vector128.Count))))); + } // Aggregate the lanes in the vector back into the scalar result - result = TAggregate.Invoke(result, TAggregate.Invoke(resultVector)); + return TAggregate.Invoke(result); } - // Aggregate the remaining items in the input span. - for (; (uint)i < (uint)x.Length; i++) + // Vectorization isn't supported or there are too few elements to vectorize. + // Use a scalar implementation. { - result = TAggregate.Invoke(result, TLoad.Invoke(x[i])); - } + float result = TLoad.Invoke(x[0]); + for (int i = 1; i < x.Length; i++) + { + result = TAggregate.Invoke(result, TLoad.Invoke(x[i])); + } - return result; + return result; + } } private static float Aggregate( - float identityValue, ReadOnlySpan x, ReadOnlySpan y) - where TBinary : IBinaryOperator - where TAggregate : IBinaryOperator + ReadOnlySpan x, ReadOnlySpan y) + where TBinary : struct, IBinaryOperator + where TAggregate : struct, IAggregationOperator { - // Initialize the result to the identity value - float result = identityValue; - int i = 0; + Debug.Assert(x.Length == y.Length); -#if NET8_0_OR_GREATER - if (Vector512.IsHardwareAccelerated && x.Length >= Vector512.Count * 2) + if (x.IsEmpty) { - ref float xRef = ref MemoryMarshal.GetReference(x); - ref float yRef = ref MemoryMarshal.GetReference(y); + return 0; + } + ref float xRef = ref MemoryMarshal.GetReference(x); + ref float yRef = ref MemoryMarshal.GetReference(y); + +#if NET8_0_OR_GREATER + if (Vector512.IsHardwareAccelerated && x.Length >= Vector512.Count) + { // Load the first vector as the initial set of results - Vector512 resultVector = TBinary.Invoke(Vector512.LoadUnsafe(ref xRef, 0), Vector512.LoadUnsafe(ref yRef, 0)); + Vector512 result = TBinary.Invoke(Vector512.LoadUnsafe(ref xRef, 0), Vector512.LoadUnsafe(ref yRef, 0)); int oneVectorFromEnd = x.Length - Vector512.Count; + int i = Vector512.Count; // Aggregate additional vectors into the result as long as there's at // least one full vector left to process. - i = Vector512.Count; - do + while (i <= oneVectorFromEnd) { - resultVector = TAggregate.Invoke(resultVector, TBinary.Invoke(Vector512.LoadUnsafe(ref xRef, (uint)i), Vector512.LoadUnsafe(ref yRef, (uint)i))); + result = TAggregate.Invoke(result, TBinary.Invoke(Vector512.LoadUnsafe(ref xRef, (uint)i), Vector512.LoadUnsafe(ref yRef, (uint)i))); i += Vector512.Count; } - while (i <= oneVectorFromEnd); + + // Process the last vector in the spans, masking off elements already processed. + if (i != x.Length) + { + result = TAggregate.Invoke(result, + Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + Vector512.Create(TAggregate.IdentityValue), + TBinary.Invoke( + Vector512.LoadUnsafe(ref xRef, (uint)(x.Length - Vector512.Count)), + Vector512.LoadUnsafe(ref yRef, (uint)(x.Length - Vector512.Count))))); + } // Aggregate the lanes in the vector back into the scalar result - result = TAggregate.Invoke(result, TAggregate.Invoke(resultVector)); + return TAggregate.Invoke(result); } - else #endif - if (Vector256.IsHardwareAccelerated && x.Length >= Vector256.Count * 2) + + if (Vector256.IsHardwareAccelerated && x.Length >= Vector256.Count) { - ref float xRef = ref MemoryMarshal.GetReference(x); - ref float yRef = ref MemoryMarshal.GetReference(y); + // Load the first vector as the initial set of results + Vector256 result = TBinary.Invoke(Vector256.LoadUnsafe(ref xRef, 0), Vector256.LoadUnsafe(ref yRef, 0)); + int oneVectorFromEnd = x.Length - Vector256.Count; + int i = Vector256.Count; + + // Aggregate additional vectors into the result as long as there's at + // least one full vector left to process. + while (i <= oneVectorFromEnd) + { + result = TAggregate.Invoke(result, TBinary.Invoke(Vector256.LoadUnsafe(ref xRef, (uint)i), Vector256.LoadUnsafe(ref yRef, (uint)i))); + i += Vector256.Count; + } + + // Process the last vector in the spans, masking off elements already processed. + if (i != x.Length) + { + result = TAggregate.Invoke(result, + Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + Vector256.Create(TAggregate.IdentityValue), + TBinary.Invoke( + Vector256.LoadUnsafe(ref xRef, (uint)(x.Length - Vector256.Count)), + Vector256.LoadUnsafe(ref yRef, (uint)(x.Length - Vector256.Count))))); + } + + // Aggregate the lanes in the vector back into the scalar result + return TAggregate.Invoke(result); + } + if (Vector128.IsHardwareAccelerated && x.Length >= Vector128.Count) + { // Load the first vector as the initial set of results - Vector256 resultVector = TBinary.Invoke(Vector256.LoadUnsafe(ref xRef, 0), Vector256.LoadUnsafe(ref yRef, 0)); + Vector128 result = TBinary.Invoke(Vector128.LoadUnsafe(ref xRef, 0), Vector128.LoadUnsafe(ref yRef, 0)); + int oneVectorFromEnd = x.Length - Vector128.Count; + int i = Vector128.Count; + + // Aggregate additional vectors into the result as long as there's at + // least one full vector left to process. + while (i <= oneVectorFromEnd) + { + result = TAggregate.Invoke(result, TBinary.Invoke(Vector128.LoadUnsafe(ref xRef, (uint)i), Vector128.LoadUnsafe(ref yRef, (uint)i))); + i += Vector128.Count; + } + + // Process the last vector in the spans, masking off elements already processed. + if (i != x.Length) + { + result = TAggregate.Invoke(result, + Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + Vector128.Create(TAggregate.IdentityValue), + TBinary.Invoke( + Vector128.LoadUnsafe(ref xRef, (uint)(x.Length - Vector128.Count)), + Vector128.LoadUnsafe(ref yRef, (uint)(x.Length - Vector128.Count))))); + } + + // Aggregate the lanes in the vector back into the scalar result + return TAggregate.Invoke(result); + } + + // Vectorization isn't supported or there are too few elements to vectorize. + // Use a scalar implementation. + { + float result = TBinary.Invoke(xRef, yRef); + for (int i = 1; i < x.Length; i++) + { + result = TAggregate.Invoke(result, + TBinary.Invoke( + Unsafe.Add(ref xRef, i), + Unsafe.Add(ref yRef, i))); + } + + return result; + } + } + + /// + /// This is the same as , + /// except it early exits on NaN. + /// + private static float MinMaxCore(ReadOnlySpan x) where TMinMax : struct, IAggregationOperator + { + if (x.IsEmpty) + { + ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); + } + + // This matches the IEEE 754:2019 `maximum`/`minimum` functions. + // It propagates NaN inputs back to the caller and + // otherwise returns the greater of the inputs. + // It treats +0 as greater than -0 as per the specification. + +#if NET8_0_OR_GREATER + if (Vector512.IsHardwareAccelerated && x.Length >= Vector512.Count) + { + ref float xRef = ref MemoryMarshal.GetReference(x); + + // Load the first vector as the initial set of results, and bail immediately + // to scalar handling if it contains any NaNs (which don't compare equally to themselves). + Vector512 result = Vector512.LoadUnsafe(ref xRef, 0), current; + if (!Vector512.EqualsAll(result, result)) + { + return GetFirstNaN(result); + } + + int oneVectorFromEnd = x.Length - Vector512.Count; + int i = Vector512.Count; + + // Aggregate additional vectors into the result as long as there's at least one full vector left to process. + while (i <= oneVectorFromEnd) + { + // Load the next vector, and early exit on NaN. + current = Vector512.LoadUnsafe(ref xRef, (uint)i); + if (!Vector512.EqualsAll(current, current)) + { + return GetFirstNaN(current); + } + + result = TMinMax.Invoke(result, current); + i += Vector512.Count; + } + + // If any elements remain, handle them in one final vector. + if (i != x.Length) + { + current = Vector512.LoadUnsafe(ref xRef, (uint)(x.Length - Vector512.Count)); + if (!Vector512.EqualsAll(current, current)) + { + return GetFirstNaN(current); + } + + result = Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + result, + TMinMax.Invoke(result, current)); + } + + // Aggregate the lanes in the vector to create the final scalar result. + return TMinMax.Invoke(result); + } +#endif + + if (Vector256.IsHardwareAccelerated && x.Length >= Vector256.Count) + { + ref float xRef = ref MemoryMarshal.GetReference(x); + + // Load the first vector as the initial set of results, and bail immediately + // to scalar handling if it contains any NaNs (which don't compare equally to themselves). + Vector256 result = Vector256.LoadUnsafe(ref xRef, 0), current; + if (!Vector256.EqualsAll(result, result)) + { + return GetFirstNaN(result); + } + int oneVectorFromEnd = x.Length - Vector256.Count; + int i = Vector256.Count; + + // Aggregate additional vectors into the result as long as there's at least one full vector left to process. + while (i <= oneVectorFromEnd) + { + // Load the next vector, and early exit on NaN. + current = Vector256.LoadUnsafe(ref xRef, (uint)i); + if (!Vector256.EqualsAll(current, current)) + { + return GetFirstNaN(current); + } + + result = TMinMax.Invoke(result, current); + i += Vector256.Count; + } - // Aggregate additional vectors into the result as long as there's at - // least one full vector left to process. - i = Vector256.Count; - do + // If any elements remain, handle them in one final vector. + if (i != x.Length) { - resultVector = TAggregate.Invoke(resultVector, TBinary.Invoke(Vector256.LoadUnsafe(ref xRef, (uint)i), Vector256.LoadUnsafe(ref yRef, (uint)i))); - i += Vector256.Count; + current = Vector256.LoadUnsafe(ref xRef, (uint)(x.Length - Vector256.Count)); + if (!Vector256.EqualsAll(current, current)) + { + return GetFirstNaN(current); + } + + result = Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + result, + TMinMax.Invoke(result, current)); } - while (i <= oneVectorFromEnd); - // Aggregate the lanes in the vector back into the scalar result - result = TAggregate.Invoke(result, TAggregate.Invoke(resultVector)); + // Aggregate the lanes in the vector to create the final scalar result. + return TMinMax.Invoke(result); } - else if (Vector128.IsHardwareAccelerated && x.Length >= Vector128.Count * 2) + + if (Vector128.IsHardwareAccelerated && x.Length >= Vector128.Count) { ref float xRef = ref MemoryMarshal.GetReference(x); - ref float yRef = ref MemoryMarshal.GetReference(y); - // Load the first vector as the initial set of results - Vector128 resultVector = TBinary.Invoke(Vector128.LoadUnsafe(ref xRef, 0), Vector128.LoadUnsafe(ref yRef, 0)); + // Load the first vector as the initial set of results, and bail immediately + // to scalar handling if it contains any NaNs (which don't compare equally to themselves). + Vector128 result = Vector128.LoadUnsafe(ref xRef, 0), current; + if (!Vector128.EqualsAll(result, result)) + { + return GetFirstNaN(result); + } + int oneVectorFromEnd = x.Length - Vector128.Count; + int i = Vector128.Count; - // Aggregate additional vectors into the result as long as there's at - // least one full vector left to process. - i = Vector128.Count; - do + // Aggregate additional vectors into the result as long as there's at least one full vector left to process. + while (i <= oneVectorFromEnd) { - resultVector = TAggregate.Invoke(resultVector, TBinary.Invoke(Vector128.LoadUnsafe(ref xRef, (uint)i), Vector128.LoadUnsafe(ref yRef, (uint)i))); + // Load the next vector, and early exit on NaN. + current = Vector128.LoadUnsafe(ref xRef, (uint)i); + if (!Vector128.EqualsAll(current, current)) + { + return GetFirstNaN(current); + } + + result = TMinMax.Invoke(result, current); i += Vector128.Count; } - while (i <= oneVectorFromEnd); - // Aggregate the lanes in the vector back into the scalar result - result = TAggregate.Invoke(result, TAggregate.Invoke(resultVector)); + // If any elements remain, handle them in one final vector. + if (i != x.Length) + { + current = Vector128.LoadUnsafe(ref xRef, (uint)(x.Length - Vector128.Count)); + if (!Vector128.EqualsAll(current, current)) + { + return GetFirstNaN(current); + } + + result = Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + result, + TMinMax.Invoke(result, current)); + } + + // Aggregate the lanes in the vector to create the final scalar result. + return TMinMax.Invoke(result); } - // Aggregate the remaining items in the input span. - for (; (uint)i < (uint)x.Length; i++) + // Scalar path used when either vectorization is not supported or the input is too small to vectorize. { - result = TAggregate.Invoke(result, TBinary.Invoke(x[i], y[i])); - } + float result = x[0]; + if (float.IsNaN(result)) + { + return result; + } + + for (int i = 1; i < x.Length; i++) + { + float current = x[i]; + if (float.IsNaN(current)) + { + return current; + } + + result = TMinMax.Invoke(result, current); + } - return result; + return result; + } } private static unsafe void InvokeSpanIntoSpan( ReadOnlySpan x, Span destination) - where TUnaryOperator : IUnaryOperator + where TUnaryOperator : struct, IUnaryOperator { if (x.Length > destination.Length) { ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float dRef = ref MemoryMarshal.GetReference(destination); int i = 0, oneVectorFromEnd; @@ -380,7 +1226,10 @@ private static unsafe void InvokeSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector512.Count); - TUnaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + Vector512.LoadUnsafe(ref dRef, lastVectorIndex), + TUnaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -406,7 +1255,10 @@ private static unsafe void InvokeSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector256.Count); - TUnaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + Vector256.LoadUnsafe(ref dRef, lastVectorIndex), + TUnaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -431,7 +1283,10 @@ private static unsafe void InvokeSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector128.Count); - TUnaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + Vector128.LoadUnsafe(ref dRef, lastVectorIndex), + TUnaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -448,7 +1303,7 @@ private static unsafe void InvokeSpanIntoSpan( private static unsafe void InvokeSpanSpanIntoSpan( ReadOnlySpan x, ReadOnlySpan y, Span destination) - where TBinaryOperator : IBinaryOperator + where TBinaryOperator : struct, IBinaryOperator { if (x.Length != y.Length) { @@ -460,6 +1315,9 @@ private static unsafe void InvokeSpanSpanIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ValidateInputOutputSpanNonOverlapping(y, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float yRef = ref MemoryMarshal.GetReference(y); ref float dRef = ref MemoryMarshal.GetReference(destination); @@ -485,8 +1343,11 @@ private static unsafe void InvokeSpanSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector512.Count); - TBinaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), - Vector512.LoadUnsafe(ref yRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + Vector512.LoadUnsafe(ref dRef, lastVectorIndex), + TBinaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), + Vector512.LoadUnsafe(ref yRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -513,8 +1374,11 @@ private static unsafe void InvokeSpanSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector256.Count); - TBinaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), - Vector256.LoadUnsafe(ref yRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + Vector256.LoadUnsafe(ref dRef, lastVectorIndex), + TBinaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), + Vector256.LoadUnsafe(ref yRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -540,8 +1404,11 @@ private static unsafe void InvokeSpanSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector128.Count); - TBinaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), - Vector128.LoadUnsafe(ref yRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + Vector128.LoadUnsafe(ref dRef, lastVectorIndex), + TBinaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), + Vector128.LoadUnsafe(ref yRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -559,13 +1426,15 @@ private static unsafe void InvokeSpanSpanIntoSpan( private static unsafe void InvokeSpanScalarIntoSpan( ReadOnlySpan x, float y, Span destination) - where TBinaryOperator : IBinaryOperator + where TBinaryOperator : struct, IBinaryOperator { if (x.Length > destination.Length) { ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float dRef = ref MemoryMarshal.GetReference(destination); int i = 0, oneVectorFromEnd; @@ -592,8 +1461,11 @@ private static unsafe void InvokeSpanScalarIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector512.Count); - TBinaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), - yVec).StoreUnsafe(ref dRef, lastVectorIndex); + Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + Vector512.LoadUnsafe(ref dRef, lastVectorIndex), + TBinaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), + yVec)).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -622,8 +1494,11 @@ private static unsafe void InvokeSpanScalarIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector256.Count); - TBinaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), - yVec).StoreUnsafe(ref dRef, lastVectorIndex); + Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + Vector256.LoadUnsafe(ref dRef, lastVectorIndex), + TBinaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), + yVec)).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -651,8 +1526,11 @@ private static unsafe void InvokeSpanScalarIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector128.Count); - TBinaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), - yVec).StoreUnsafe(ref dRef, lastVectorIndex); + Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + Vector128.LoadUnsafe(ref dRef, lastVectorIndex), + TBinaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), + yVec)).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -670,7 +1548,7 @@ private static unsafe void InvokeSpanScalarIntoSpan( private static unsafe void InvokeSpanSpanSpanIntoSpan( ReadOnlySpan x, ReadOnlySpan y, ReadOnlySpan z, Span destination) - where TTernaryOperator : ITernaryOperator + where TTernaryOperator : struct, ITernaryOperator { if (x.Length != y.Length || x.Length != z.Length) { @@ -682,6 +1560,10 @@ private static unsafe void InvokeSpanSpanSpanIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ValidateInputOutputSpanNonOverlapping(y, destination); + ValidateInputOutputSpanNonOverlapping(z, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float yRef = ref MemoryMarshal.GetReference(y); ref float zRef = ref MemoryMarshal.GetReference(z); @@ -709,9 +1591,12 @@ private static unsafe void InvokeSpanSpanSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector512.Count); - TTernaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), - Vector512.LoadUnsafe(ref yRef, lastVectorIndex), - Vector512.LoadUnsafe(ref zRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + Vector512.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), + Vector512.LoadUnsafe(ref yRef, lastVectorIndex), + Vector512.LoadUnsafe(ref zRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -739,9 +1624,12 @@ private static unsafe void InvokeSpanSpanSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector256.Count); - TTernaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), - Vector256.LoadUnsafe(ref yRef, lastVectorIndex), - Vector256.LoadUnsafe(ref zRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + Vector256.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), + Vector256.LoadUnsafe(ref yRef, lastVectorIndex), + Vector256.LoadUnsafe(ref zRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -768,9 +1656,12 @@ private static unsafe void InvokeSpanSpanSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector128.Count); - TTernaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), - Vector128.LoadUnsafe(ref yRef, lastVectorIndex), - Vector128.LoadUnsafe(ref zRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + Vector128.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), + Vector128.LoadUnsafe(ref yRef, lastVectorIndex), + Vector128.LoadUnsafe(ref zRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -789,7 +1680,7 @@ private static unsafe void InvokeSpanSpanSpanIntoSpan( private static unsafe void InvokeSpanSpanScalarIntoSpan( ReadOnlySpan x, ReadOnlySpan y, float z, Span destination) - where TTernaryOperator : ITernaryOperator + where TTernaryOperator : struct, ITernaryOperator { if (x.Length != y.Length) { @@ -801,6 +1692,9 @@ private static unsafe void InvokeSpanSpanScalarIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ValidateInputOutputSpanNonOverlapping(y, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float yRef = ref MemoryMarshal.GetReference(y); ref float dRef = ref MemoryMarshal.GetReference(destination); @@ -829,9 +1723,12 @@ private static unsafe void InvokeSpanSpanScalarIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector512.Count); - TTernaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), - Vector512.LoadUnsafe(ref yRef, lastVectorIndex), - zVec).StoreUnsafe(ref dRef, lastVectorIndex); + Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + Vector512.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), + Vector512.LoadUnsafe(ref yRef, lastVectorIndex), + zVec)).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -861,9 +1758,12 @@ private static unsafe void InvokeSpanSpanScalarIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector256.Count); - TTernaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), - Vector256.LoadUnsafe(ref yRef, lastVectorIndex), - zVec).StoreUnsafe(ref dRef, lastVectorIndex); + Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + Vector256.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), + Vector256.LoadUnsafe(ref yRef, lastVectorIndex), + zVec)).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -892,9 +1792,12 @@ private static unsafe void InvokeSpanSpanScalarIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector128.Count); - TTernaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), - Vector128.LoadUnsafe(ref yRef, lastVectorIndex), - zVec).StoreUnsafe(ref dRef, lastVectorIndex); + Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + Vector128.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), + Vector128.LoadUnsafe(ref yRef, lastVectorIndex), + zVec)).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -913,7 +1816,7 @@ private static unsafe void InvokeSpanSpanScalarIntoSpan( private static unsafe void InvokeSpanScalarSpanIntoSpan( ReadOnlySpan x, float y, ReadOnlySpan z, Span destination) - where TTernaryOperator : ITernaryOperator + where TTernaryOperator : struct, ITernaryOperator { if (x.Length != z.Length) { @@ -925,6 +1828,9 @@ private static unsafe void InvokeSpanScalarSpanIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ValidateInputOutputSpanNonOverlapping(z, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float zRef = ref MemoryMarshal.GetReference(z); ref float dRef = ref MemoryMarshal.GetReference(destination); @@ -953,9 +1859,12 @@ private static unsafe void InvokeSpanScalarSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector512.Count); - TTernaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), - yVec, - Vector512.LoadUnsafe(ref zRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector512.ConditionalSelect( + Vector512.Equals(LoadRemainderMaskSingleVector512(x.Length - i), Vector512.Zero), + Vector512.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector512.LoadUnsafe(ref xRef, lastVectorIndex), + yVec, + Vector512.LoadUnsafe(ref zRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; @@ -985,206 +1894,631 @@ private static unsafe void InvokeSpanScalarSpanIntoSpan( if (i != x.Length) { uint lastVectorIndex = (uint)(x.Length - Vector256.Count); - TTernaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), + Vector256.ConditionalSelect( + Vector256.Equals(LoadRemainderMaskSingleVector256(x.Length - i), Vector256.Zero), + Vector256.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector256.LoadUnsafe(ref xRef, lastVectorIndex), + yVec, + Vector256.LoadUnsafe(ref zRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); + } + + return; + } + } + + if (Vector128.IsHardwareAccelerated) + { + oneVectorFromEnd = x.Length - Vector128.Count; + if (i <= oneVectorFromEnd) + { + Vector128 yVec = Vector128.Create(y); + + // Loop handling one vector at a time. + do + { + TTernaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, (uint)i), yVec, - Vector256.LoadUnsafe(ref zRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); + Vector128.LoadUnsafe(ref zRef, (uint)i)).StoreUnsafe(ref dRef, (uint)i); + + i += Vector128.Count; + } + while (i <= oneVectorFromEnd); + + // Handle any remaining elements with a final vector. + if (i != x.Length) + { + uint lastVectorIndex = (uint)(x.Length - Vector128.Count); + Vector128.ConditionalSelect( + Vector128.Equals(LoadRemainderMaskSingleVector128(x.Length - i), Vector128.Zero), + Vector128.LoadUnsafe(ref dRef, lastVectorIndex), + TTernaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), + yVec, + Vector128.LoadUnsafe(ref zRef, lastVectorIndex))).StoreUnsafe(ref dRef, lastVectorIndex); } return; } } - if (Vector128.IsHardwareAccelerated) + while (i < x.Length) + { + Unsafe.Add(ref dRef, i) = TTernaryOperator.Invoke(Unsafe.Add(ref xRef, i), + y, + Unsafe.Add(ref zRef, i)); + + i++; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static Vector128 FusedMultiplyAdd(Vector128 x, Vector128 y, Vector128 addend) + { + if (Fma.IsSupported) + { + return Fma.MultiplyAdd(x, y, addend); + } + + if (AdvSimd.IsSupported) + { + return AdvSimd.FusedMultiplyAdd(addend, x, y); + } + + return (x * y) + addend; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static Vector256 FusedMultiplyAdd(Vector256 x, Vector256 y, Vector256 addend) + { + if (Fma.IsSupported) + { + return Fma.MultiplyAdd(x, y, addend); + } + + return (x * y) + addend; + } + +#if NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static Vector512 FusedMultiplyAdd(Vector512 x, Vector512 y, Vector512 addend) + { + if (Avx512F.IsSupported) + { + return Avx512F.FusedMultiplyAdd(x, y, addend); + } + + return (x * y) + addend; + } +#endif + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static float HorizontalAggregate(Vector128 x) where TAggregate : struct, IBinaryOperator => + TAggregate.Invoke( + TAggregate.Invoke(x[0], x[1]), + TAggregate.Invoke(x[2], x[3])); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static float HorizontalAggregate(Vector256 x) where TAggregate : struct, IBinaryOperator => + HorizontalAggregate(TAggregate.Invoke(x.GetLower(), x.GetUpper())); + +#if NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static float HorizontalAggregate(Vector512 x) where TAggregate : struct, IBinaryOperator => + HorizontalAggregate(TAggregate.Invoke(x.GetLower(), x.GetUpper())); +#endif + + private static bool IsNegative(float f) => float.IsNegative(f); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static Vector128 IsNegative(Vector128 vector) => + Vector128.LessThan(vector.AsInt32(), Vector128.Zero).AsSingle(); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static Vector256 IsNegative(Vector256 vector) => + Vector256.LessThan(vector.AsInt32(), Vector256.Zero).AsSingle(); + +#if NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static Vector512 IsNegative(Vector512 vector) => + Vector512.LessThan(vector.AsInt32(), Vector512.Zero).AsSingle(); +#endif + + private static float GetFirstNaN(Vector128 vector) => + vector[BitOperations.TrailingZeroCount((~Vector128.Equals(vector, vector)).ExtractMostSignificantBits())]; + + private static float GetFirstNaN(Vector256 vector) => + vector[BitOperations.TrailingZeroCount((~Vector256.Equals(vector, vector)).ExtractMostSignificantBits())]; + +#if NET8_0_OR_GREATER + private static float GetFirstNaN(Vector512 vector) => + vector[BitOperations.TrailingZeroCount((~Vector512.Equals(vector, vector)).ExtractMostSignificantBits())]; +#endif + + private static float Log2(float x) => MathF.Log2(x); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static unsafe Vector128 LoadRemainderMaskSingleVector128(int validItems) => + Vector128.LoadUnsafe( + ref Unsafe.As(ref MemoryMarshal.GetReference(RemainderUInt32Mask_16x16)), + (uint)((validItems * 16) + 12)); // last four floats in the row + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static unsafe Vector256 LoadRemainderMaskSingleVector256(int validItems) => + Vector256.LoadUnsafe( + ref Unsafe.As(ref MemoryMarshal.GetReference(RemainderUInt32Mask_16x16)), + (uint)((validItems * 16) + 8)); // last eight floats in the row + +#if NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static unsafe Vector512 LoadRemainderMaskSingleVector512(int validItems) => + Vector512.LoadUnsafe( + ref Unsafe.As(ref MemoryMarshal.GetReference(RemainderUInt32Mask_16x16)), + (uint)(validItems * 16)); // all sixteen floats in the row +#endif + + private readonly struct AddOperator : IAggregationOperator + { + public static float Invoke(float x, float y) => x + y; + public static Vector128 Invoke(Vector128 x, Vector128 y) => x + y; + public static Vector256 Invoke(Vector256 x, Vector256 y) => x + y; +#if NET8_0_OR_GREATER + public static Vector512 Invoke(Vector512 x, Vector512 y) => x + y; +#endif + + public static float Invoke(Vector128 x) => Vector128.Sum(x); + public static float Invoke(Vector256 x) => Vector256.Sum(x); +#if NET8_0_OR_GREATER + public static float Invoke(Vector512 x) => Vector512.Sum(x); +#endif + + public static float IdentityValue => 0; + } + + private readonly struct SubtractOperator : IBinaryOperator + { + public static float Invoke(float x, float y) => x - y; + public static Vector128 Invoke(Vector128 x, Vector128 y) => x - y; + public static Vector256 Invoke(Vector256 x, Vector256 y) => x - y; +#if NET8_0_OR_GREATER + public static Vector512 Invoke(Vector512 x, Vector512 y) => x - y; +#endif + } + + private readonly struct SubtractSquaredOperator : IBinaryOperator + { + public static float Invoke(float x, float y) + { + float tmp = x - y; + return tmp * tmp; + } + + public static Vector128 Invoke(Vector128 x, Vector128 y) + { + Vector128 tmp = x - y; + return tmp * tmp; + } + + public static Vector256 Invoke(Vector256 x, Vector256 y) + { + Vector256 tmp = x - y; + return tmp * tmp; + } + +#if NET8_0_OR_GREATER + public static Vector512 Invoke(Vector512 x, Vector512 y) + { + Vector512 tmp = x - y; + return tmp * tmp; + } +#endif + } + + private readonly struct MultiplyOperator : IAggregationOperator + { + public static float Invoke(float x, float y) => x * y; + public static Vector128 Invoke(Vector128 x, Vector128 y) => x * y; + public static Vector256 Invoke(Vector256 x, Vector256 y) => x * y; +#if NET8_0_OR_GREATER + public static Vector512 Invoke(Vector512 x, Vector512 y) => x * y; +#endif + + public static float Invoke(Vector128 x) => HorizontalAggregate(x); + public static float Invoke(Vector256 x) => HorizontalAggregate(x); +#if NET8_0_OR_GREATER + public static float Invoke(Vector512 x) => HorizontalAggregate(x); +#endif + + public static float IdentityValue => 1; + } + + private readonly struct DivideOperator : IBinaryOperator + { + public static float Invoke(float x, float y) => x / y; + public static Vector128 Invoke(Vector128 x, Vector128 y) => x / y; + public static Vector256 Invoke(Vector256 x, Vector256 y) => x / y; +#if NET8_0_OR_GREATER + public static Vector512 Invoke(Vector512 x, Vector512 y) => x / y; +#endif + } + + private readonly struct MaxOperator : IAggregationOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static float Invoke(float x, float y) => + x == y ? + (IsNegative(x) ? y : x) : + (y > x ? y : x); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector128 Invoke(Vector128 x, Vector128 y) + { + if (AdvSimd.IsSupported) + { + return AdvSimd.Max(x, y); + } + + return + Vector128.ConditionalSelect(Vector128.Equals(x, y), + Vector128.ConditionalSelect(IsNegative(x), y, x), + Vector128.Max(x, y)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector256 Invoke(Vector256 x, Vector256 y) => + Vector256.ConditionalSelect(Vector256.Equals(x, y), + Vector256.ConditionalSelect(IsNegative(x), y, x), + Vector256.Max(x, y)); + +#if NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector512 Invoke(Vector512 x, Vector512 y) => + Vector512.ConditionalSelect(Vector512.Equals(x, y), + Vector512.ConditionalSelect(IsNegative(x), y, x), + Vector512.Max(x, y)); +#endif + + public static float Invoke(Vector128 x) => HorizontalAggregate(x); + public static float Invoke(Vector256 x) => HorizontalAggregate(x); +#if NET8_0_OR_GREATER + public static float Invoke(Vector512 x) => HorizontalAggregate(x); +#endif + } + + private readonly struct MaxPropagateNaNOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static float Invoke(float x, float y) => MathF.Max(x, y); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector128 Invoke(Vector128 x, Vector128 y) { - oneVectorFromEnd = x.Length - Vector128.Count; - if (i <= oneVectorFromEnd) + if (AdvSimd.IsSupported) { - Vector128 yVec = Vector128.Create(y); + return AdvSimd.Max(x, y); + } - // Loop handling one vector at a time. - do - { - TTernaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, (uint)i), - yVec, - Vector128.LoadUnsafe(ref zRef, (uint)i)).StoreUnsafe(ref dRef, (uint)i); + return + Vector128.ConditionalSelect(Vector128.Equals(x, x), + Vector128.ConditionalSelect(Vector128.Equals(y, y), + Vector128.ConditionalSelect(Vector128.Equals(x, y), + Vector128.ConditionalSelect(IsNegative(x), y, x), + Vector128.Max(x, y)), + y), + x); + } - i += Vector128.Count; - } - while (i <= oneVectorFromEnd); + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector256 Invoke(Vector256 x, Vector256 y) => + Vector256.ConditionalSelect(Vector256.Equals(x, x), + Vector256.ConditionalSelect(Vector256.Equals(y, y), + Vector256.ConditionalSelect(Vector256.Equals(x, y), + Vector256.ConditionalSelect(IsNegative(x), y, x), + Vector256.Max(x, y)), + y), + x); - // Handle any remaining elements with a final vector. - if (i != x.Length) - { - uint lastVectorIndex = (uint)(x.Length - Vector128.Count); - TTernaryOperator.Invoke(Vector128.LoadUnsafe(ref xRef, lastVectorIndex), - yVec, - Vector128.LoadUnsafe(ref zRef, lastVectorIndex)).StoreUnsafe(ref dRef, lastVectorIndex); - } +#if NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector512 Invoke(Vector512 x, Vector512 y) => + Vector512.ConditionalSelect(Vector512.Equals(x, x), + Vector512.ConditionalSelect(Vector512.Equals(y, y), + Vector512.ConditionalSelect(Vector512.Equals(x, y), + Vector512.ConditionalSelect(IsNegative(x), y, x), + Vector512.Max(x, y)), + y), + x); +#endif + } - return; - } + private readonly struct MaxMagnitudeOperator : IAggregationOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static float Invoke(float x, float y) + { + float xMag = MathF.Abs(x), yMag = MathF.Abs(y); + return + xMag == yMag ? + (IsNegative(x) ? y : x) : + (xMag > yMag ? x : y); } - while (i < x.Length) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector128 Invoke(Vector128 x, Vector128 y) { - Unsafe.Add(ref dRef, i) = TTernaryOperator.Invoke(Unsafe.Add(ref xRef, i), - y, - Unsafe.Add(ref zRef, i)); - - i++; + Vector128 xMag = Vector128.Abs(x), yMag = Vector128.Abs(y); + return + Vector128.ConditionalSelect(Vector128.Equals(xMag, yMag), + Vector128.ConditionalSelect(IsNegative(x), y, x), + Vector128.ConditionalSelect(Vector128.GreaterThan(xMag, yMag), x, y)); } - } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static Vector128 FusedMultiplyAdd(Vector128 x, Vector128 y, Vector128 addend) - { - if (Fma.IsSupported) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector256 Invoke(Vector256 x, Vector256 y) { - return Fma.MultiplyAdd(x, y, addend); + Vector256 xMag = Vector256.Abs(x), yMag = Vector256.Abs(y); + return + Vector256.ConditionalSelect(Vector256.Equals(xMag, yMag), + Vector256.ConditionalSelect(IsNegative(x), y, x), + Vector256.ConditionalSelect(Vector256.GreaterThan(xMag, yMag), x, y)); } - if (AdvSimd.IsSupported) +#if NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector512 Invoke(Vector512 x, Vector512 y) { - return AdvSimd.FusedMultiplyAdd(addend, x, y); + Vector512 xMag = Vector512.Abs(x), yMag = Vector512.Abs(y); + return + Vector512.ConditionalSelect(Vector512.Equals(xMag, yMag), + Vector512.ConditionalSelect(IsNegative(x), y, x), + Vector512.ConditionalSelect(Vector512.GreaterThan(xMag, yMag), x, y)); } +#endif - return (x * y) + addend; + public static float Invoke(Vector128 x) => HorizontalAggregate(x); + public static float Invoke(Vector256 x) => HorizontalAggregate(x); +#if NET8_0_OR_GREATER + public static float Invoke(Vector512 x) => HorizontalAggregate(x); +#endif } - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static Vector256 FusedMultiplyAdd(Vector256 x, Vector256 y, Vector256 addend) + private readonly struct MaxMagnitudePropagateNaNOperator : IBinaryOperator { - if (Fma.IsSupported) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static float Invoke(float x, float y) => MathF.MaxMagnitude(x, y); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector128 Invoke(Vector128 x, Vector128 y) { - return Fma.MultiplyAdd(x, y, addend); + Vector128 xMag = Vector128.Abs(x), yMag = Vector128.Abs(y); + return + Vector128.ConditionalSelect(Vector128.Equals(x, x), + Vector128.ConditionalSelect(Vector128.Equals(y, y), + Vector128.ConditionalSelect(Vector128.Equals(yMag, xMag), + Vector128.ConditionalSelect(IsNegative(x), y, x), + Vector128.ConditionalSelect(Vector128.GreaterThan(yMag, xMag), y, x)), + y), + x); } - return (x * y) + addend; - } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector256 Invoke(Vector256 x, Vector256 y) + { + Vector256 xMag = Vector256.Abs(x), yMag = Vector256.Abs(y); + return + Vector256.ConditionalSelect(Vector256.Equals(x, x), + Vector256.ConditionalSelect(Vector256.Equals(y, y), + Vector256.ConditionalSelect(Vector256.Equals(xMag, yMag), + Vector256.ConditionalSelect(IsNegative(x), y, x), + Vector256.ConditionalSelect(Vector256.GreaterThan(xMag, yMag), x, y)), + y), + x); + } #if NET8_0_OR_GREATER - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private static Vector512 FusedMultiplyAdd(Vector512 x, Vector512 y, Vector512 addend) - { - if (Avx512F.IsSupported) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector512 Invoke(Vector512 x, Vector512 y) { - return Avx512F.FusedMultiplyAdd(x, y, addend); + Vector512 xMag = Vector512.Abs(x), yMag = Vector512.Abs(y); + return + Vector512.ConditionalSelect(Vector512.Equals(x, x), + Vector512.ConditionalSelect(Vector512.Equals(y, y), + Vector512.ConditionalSelect(Vector512.Equals(xMag, yMag), + Vector512.ConditionalSelect(IsNegative(x), y, x), + Vector512.ConditionalSelect(Vector512.GreaterThan(xMag, yMag), x, y)), + y), + x); } - - return (x * y) + addend; - } #endif + } - private readonly struct AddOperator : IBinaryOperator + private readonly struct MinOperator : IAggregationOperator { - public static float Invoke(float x, float y) => x + y; - public static Vector128 Invoke(Vector128 x, Vector128 y) => x + y; - public static Vector256 Invoke(Vector256 x, Vector256 y) => x + y; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static float Invoke(float x, float y) => + x == y ? + (IsNegative(y) ? y : x) : + (y < x ? y : x); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector128 Invoke(Vector128 x, Vector128 y) + { + if (AdvSimd.IsSupported) + { + return AdvSimd.Min(x, y); + } + + return + Vector128.ConditionalSelect(Vector128.Equals(x, y), + Vector128.ConditionalSelect(IsNegative(y), y, x), + Vector128.Min(x, y)); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector256 Invoke(Vector256 x, Vector256 y) => + Vector256.ConditionalSelect(Vector256.Equals(x, y), + Vector256.ConditionalSelect(IsNegative(y), y, x), + Vector256.Min(x, y)); + #if NET8_0_OR_GREATER - public static Vector512 Invoke(Vector512 x, Vector512 y) => x + y; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector512 Invoke(Vector512 x, Vector512 y) => + Vector512.ConditionalSelect(Vector512.Equals(x, y), + Vector512.ConditionalSelect(IsNegative(y), y, x), + Vector512.Min(x, y)); #endif - public static float Invoke(Vector128 x) => Vector128.Sum(x); - public static float Invoke(Vector256 x) => Vector256.Sum(x); + public static float Invoke(Vector128 x) => HorizontalAggregate(x); + public static float Invoke(Vector256 x) => HorizontalAggregate(x); #if NET8_0_OR_GREATER - public static float Invoke(Vector512 x) => Vector512.Sum(x); + public static float Invoke(Vector512 x) => HorizontalAggregate(x); #endif } - private readonly struct SubtractOperator : IBinaryOperator + private readonly struct MinPropagateNaNOperator : IBinaryOperator { - public static float Invoke(float x, float y) => x - y; - public static Vector128 Invoke(Vector128 x, Vector128 y) => x - y; - public static Vector256 Invoke(Vector256 x, Vector256 y) => x - y; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static float Invoke(float x, float y) => MathF.Min(x, y); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector128 Invoke(Vector128 x, Vector128 y) + { + if (AdvSimd.IsSupported) + { + return AdvSimd.Min(x, y); + } + + return + Vector128.ConditionalSelect(Vector128.Equals(x, x), + Vector128.ConditionalSelect(Vector128.Equals(y, y), + Vector128.ConditionalSelect(Vector128.Equals(x, y), + Vector128.ConditionalSelect(IsNegative(x), x, y), + Vector128.Min(x, y)), + y), + x); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector256 Invoke(Vector256 x, Vector256 y) => + Vector256.ConditionalSelect(Vector256.Equals(x, x), + Vector256.ConditionalSelect(Vector256.Equals(y, y), + Vector256.ConditionalSelect(Vector256.Equals(x, y), + Vector256.ConditionalSelect(IsNegative(x), x, y), + Vector256.Min(x, y)), + y), + x); + #if NET8_0_OR_GREATER - public static Vector512 Invoke(Vector512 x, Vector512 y) => x - y; + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector512 Invoke(Vector512 x, Vector512 y) => + Vector512.ConditionalSelect(Vector512.Equals(x, x), + Vector512.ConditionalSelect(Vector512.Equals(y, y), + Vector512.ConditionalSelect(Vector512.Equals(x, y), + Vector512.ConditionalSelect(IsNegative(x), x, y), + Vector512.Min(x, y)), + y), + x); #endif } - private readonly struct SubtractSquaredOperator : IBinaryOperator + private readonly struct MinMagnitudeOperator : IAggregationOperator { + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static float Invoke(float x, float y) { - float tmp = x - y; - return tmp * tmp; + float xMag = MathF.Abs(x), yMag = MathF.Abs(y); + return xMag == yMag ? + (IsNegative(y) ? y : x) : + (yMag < xMag ? y : x); } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static Vector128 Invoke(Vector128 x, Vector128 y) { - Vector128 tmp = x - y; - return tmp * tmp; + Vector128 xMag = Vector128.Abs(x), yMag = Vector128.Abs(y); + return + Vector128.ConditionalSelect(Vector128.Equals(yMag, xMag), + Vector128.ConditionalSelect(IsNegative(y), y, x), + Vector128.ConditionalSelect(Vector128.LessThan(yMag, xMag), y, x)); } + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static Vector256 Invoke(Vector256 x, Vector256 y) { - Vector256 tmp = x - y; - return tmp * tmp; + Vector256 xMag = Vector256.Abs(x), yMag = Vector256.Abs(y); + return + Vector256.ConditionalSelect(Vector256.Equals(yMag, xMag), + Vector256.ConditionalSelect(IsNegative(y), y, x), + Vector256.ConditionalSelect(Vector256.LessThan(yMag, xMag), y, x)); } #if NET8_0_OR_GREATER + [MethodImpl(MethodImplOptions.AggressiveInlining)] public static Vector512 Invoke(Vector512 x, Vector512 y) { - Vector512 tmp = x - y; - return tmp * tmp; + Vector512 xMag = Vector512.Abs(x), yMag = Vector512.Abs(y); + return + Vector512.ConditionalSelect(Vector512.Equals(yMag, xMag), + Vector512.ConditionalSelect(IsNegative(y), y, x), + Vector512.ConditionalSelect(Vector512.LessThan(yMag, xMag), y, x)); } #endif - } - private readonly struct MultiplyOperator : IBinaryOperator - { - public static float Invoke(float x, float y) => x * y; - public static Vector128 Invoke(Vector128 x, Vector128 y) => x * y; - public static Vector256 Invoke(Vector256 x, Vector256 y) => x * y; + public static float Invoke(Vector128 x) => HorizontalAggregate(x); + public static float Invoke(Vector256 x) => HorizontalAggregate(x); #if NET8_0_OR_GREATER - public static Vector512 Invoke(Vector512 x, Vector512 y) => x * y; + public static float Invoke(Vector512 x) => HorizontalAggregate(x); #endif + } + + private readonly struct MinMagnitudePropagateNaNOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static float Invoke(float x, float y) => MathF.MinMagnitude(x, y); [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static float Invoke(Vector128 x) + public static Vector128 Invoke(Vector128 x, Vector128 y) { - float f = x[0]; - for (int i = 1; i < Vector128.Count; i++) - { - f *= x[i]; - } - return f; + Vector128 xMag = Vector128.Abs(x), yMag = Vector128.Abs(y); + return + Vector128.ConditionalSelect(Vector128.Equals(x, x), + Vector128.ConditionalSelect(Vector128.Equals(y, y), + Vector128.ConditionalSelect(Vector128.Equals(yMag, xMag), + Vector128.ConditionalSelect(IsNegative(x), x, y), + Vector128.ConditionalSelect(Vector128.LessThan(xMag, yMag), x, y)), + y), + x); } [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static float Invoke(Vector256 x) + public static Vector256 Invoke(Vector256 x, Vector256 y) { - float f = x[0]; - for (int i = 1; i < Vector256.Count; i++) - { - f *= x[i]; - } - return f; + Vector256 xMag = Vector256.Abs(x), yMag = Vector256.Abs(y); + return + Vector256.ConditionalSelect(Vector256.Equals(x, x), + Vector256.ConditionalSelect(Vector256.Equals(y, y), + Vector256.ConditionalSelect(Vector256.Equals(yMag, xMag), + Vector256.ConditionalSelect(IsNegative(x), x, y), + Vector256.ConditionalSelect(Vector256.LessThan(xMag, yMag), x, y)), + y), + x); } #if NET8_0_OR_GREATER [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static float Invoke(Vector512 x) + public static Vector512 Invoke(Vector512 x, Vector512 y) { - float f = x[0]; - for (int i = 1; i < Vector512.Count; i++) - { - f *= x[i]; - } - return f; + Vector512 xMag = Vector512.Abs(x), yMag = Vector512.Abs(y); + return + Vector512.ConditionalSelect(Vector512.Equals(x, x), + Vector512.ConditionalSelect(Vector512.Equals(y, y), + Vector512.ConditionalSelect(Vector512.Equals(yMag, xMag), + Vector512.ConditionalSelect(IsNegative(x), x, y), + Vector512.ConditionalSelect(Vector512.LessThan(xMag, yMag), x, y)), + y), + x); } #endif } - private readonly struct DivideOperator : IBinaryOperator - { - public static float Invoke(float x, float y) => x / y; - public static Vector128 Invoke(Vector128 x, Vector128 y) => x / y; - public static Vector256 Invoke(Vector256 x, Vector256 y) => x / y; -#if NET8_0_OR_GREATER - public static Vector512 Invoke(Vector512 x, Vector512 y) => x / y; -#endif - } - private readonly struct NegateOperator : IUnaryOperator { public static float Invoke(float x) => -x; @@ -1207,11 +2541,11 @@ public static float Invoke(Vector512 x) private readonly struct MultiplyAddOperator : ITernaryOperator { - public static float Invoke(float x, float y, float z) => MathF.FusedMultiplyAdd(x, y, z); - public static Vector128 Invoke(Vector128 x, Vector128 y, Vector128 z) => FusedMultiplyAdd(x, y, z); - public static Vector256 Invoke(Vector256 x, Vector256 y, Vector256 z) => FusedMultiplyAdd(x, y, z); + public static float Invoke(float x, float y, float z) => (x * y) + z; + public static Vector128 Invoke(Vector128 x, Vector128 y, Vector128 z) => (x * y) + z; + public static Vector256 Invoke(Vector256 x, Vector256 y, Vector256 z) => (x * y) + z; #if NET8_0_OR_GREATER - public static Vector512 Invoke(Vector512 x, Vector512 y, Vector512 z) => FusedMultiplyAdd(x, y, z); + public static Vector512 Invoke(Vector512 x, Vector512 y, Vector512 z) => (x * y) + z; #endif } @@ -1245,6 +2579,571 @@ public static float Invoke(Vector512 x) #endif } + private readonly struct LogOperator : IUnaryOperator + { + // This code is based on `vrs4_logf` from amd/aocl-libm-ose + // Copyright (C) 2018-2019 Advanced Micro Devices, Inc. All rights reserved. + // + // Licensed under the BSD 3-Clause "New" or "Revised" License + // See THIRD-PARTY-NOTICES.TXT for the full license text + + // Spec: + // logf(x) + // = logf(x) if x ∈ F and x > 0 + // = x if x = qNaN + // = 0 if x = 1 + // = -inf if x = (-0, 0} + // = NaN otherwise + // + // Assumptions/Expectations + // - ULP is derived to be << 4 (always) + // - Some FPU Exceptions may not be available + // - Performance is at least 3x + // + // Implementation Notes: + // 1. Range Reduction: + // x = 2^n*(1+f) .... (1) + // where n is exponent and is an integer + // (1+f) is mantissa ∈ [1,2). i.e., 1 ≤ 1+f < 2 .... (2) + // + // From (1), taking log on both sides + // log(x) = log(2^n * (1+f)) + // = log(2^n) + log(1+f) + // = n*log(2) + log(1+f) .... (3) + // + // let z = 1 + f + // log(z) = log(k) + log(z) - log(k) + // log(z) = log(kz) - log(k) + // + // From (2), range of z is [1, 2) + // by simply dividing range by 'k', z is in [1/k, 2/k) .... (4) + // Best choice of k is the one which gives equal and opposite values + // at extrema +- -+ + // 1 | 2 | + // --- - 1 = - |--- - 1 | + // k | k | .... (5) + // +- -+ + // + // Solving for k, k = 3/2, + // From (4), using 'k' value, range is therefore [-0.3333, 0.3333] + // + // 2. Polynomial Approximation: + // More information refer to tools/sollya/vrs4_logf.sollya + // + // 7th Deg - Error abs: 0x1.04c4ac98p-22 rel: 0x1.2216e6f8p-19 + // 6th Deg - Error abs: 0x1.179e97d8p-19 rel: 0x1.db676c1p-17 + + private const uint V_MIN = 0x00800000; + private const uint V_MAX = 0x7F800000; + private const uint V_MASK = 0x007FFFFF; + private const uint V_OFF = 0x3F2AAAAB; + + private const float V_LN2 = 0.6931472f; + + private const float C0 = 0.0f; + private const float C1 = 1.0f; + private const float C2 = -0.5000001f; + private const float C3 = 0.33332965f; + private const float C4 = -0.24999046f; + private const float C5 = 0.20018855f; + private const float C6 = -0.16700386f; + private const float C7 = 0.13902695f; + private const float C8 = -0.1197452f; + private const float C9 = 0.14401625f; + private const float C10 = -0.13657966f; + + public static float Invoke(float x) => MathF.Log(x); + + public static Vector128 Invoke(Vector128 x) + { + Vector128 specialResult = x; + + // x is subnormal or infinity or NaN + Vector128 specialMask = Vector128.GreaterThanOrEqual(x.AsUInt32() - Vector128.Create(V_MIN), Vector128.Create(V_MAX - V_MIN)); + + if (specialMask != Vector128.Zero) + { + // float.IsZero(x) ? float.NegativeInfinity : x + Vector128 zeroMask = Vector128.Equals(x, Vector128.Zero); + + specialResult = Vector128.ConditionalSelect( + zeroMask, + Vector128.Create(float.NegativeInfinity), + specialResult + ); + + // (x < 0) ? float.NaN : x + Vector128 lessThanZeroMask = Vector128.LessThan(x, Vector128.Zero); + + specialResult = Vector128.ConditionalSelect( + lessThanZeroMask, + Vector128.Create(float.NaN), + specialResult + ); + + // float.IsZero(x) | (x < 0) | float.IsNaN(x) | float.IsPositiveInfinity(x) + Vector128 temp = zeroMask + | lessThanZeroMask + | ~Vector128.Equals(x, x) + | Vector128.Equals(x, Vector128.Create(float.PositiveInfinity)); + + // subnormal + Vector128 subnormalMask = Vector128.AndNot(specialMask.AsSingle(), temp); + + x = Vector128.ConditionalSelect( + subnormalMask, + ((x * 8388608.0f).AsUInt32() - Vector128.Create(23u << 23)).AsSingle(), + x + ); + + specialMask = temp.AsUInt32(); + } + + Vector128 vx = x.AsUInt32() - Vector128.Create(V_OFF); + Vector128 n = Vector128.ConvertToSingle(Vector128.ShiftRightArithmetic(vx.AsInt32(), 23)); + + vx = (vx & Vector128.Create(V_MASK)) + Vector128.Create(V_OFF); + + Vector128 r = vx.AsSingle() - Vector128.Create(1.0f); + + Vector128 r2 = r * r; + Vector128 r4 = r2 * r2; + Vector128 r8 = r4 * r4; + + Vector128 q = (Vector128.Create(C10) * r2 + (Vector128.Create(C9) * r + Vector128.Create(C8))) + * r8 + (((Vector128.Create(C7) * r + Vector128.Create(C6)) + * r2 + (Vector128.Create(C5) * r + Vector128.Create(C4))) + * r4 + ((Vector128.Create(C3) * r + Vector128.Create(C2)) + * r2 + (Vector128.Create(C1) * r + Vector128.Create(C0)))); + + return Vector128.ConditionalSelect( + specialMask.AsSingle(), + specialResult, + n * Vector128.Create(V_LN2) + q + ); + } + + public static Vector256 Invoke(Vector256 x) + { + Vector256 specialResult = x; + + // x is subnormal or infinity or NaN + Vector256 specialMask = Vector256.GreaterThanOrEqual(x.AsUInt32() - Vector256.Create(V_MIN), Vector256.Create(V_MAX - V_MIN)); + + if (specialMask != Vector256.Zero) + { + // float.IsZero(x) ? float.NegativeInfinity : x + Vector256 zeroMask = Vector256.Equals(x, Vector256.Zero); + + specialResult = Vector256.ConditionalSelect( + zeroMask, + Vector256.Create(float.NegativeInfinity), + specialResult + ); + + // (x < 0) ? float.NaN : x + Vector256 lessThanZeroMask = Vector256.LessThan(x, Vector256.Zero); + + specialResult = Vector256.ConditionalSelect( + lessThanZeroMask, + Vector256.Create(float.NaN), + specialResult + ); + + // float.IsZero(x) | (x < 0) | float.IsNaN(x) | float.IsPositiveInfinity(x) + Vector256 temp = zeroMask + | lessThanZeroMask + | ~Vector256.Equals(x, x) + | Vector256.Equals(x, Vector256.Create(float.PositiveInfinity)); + + // subnormal + Vector256 subnormalMask = Vector256.AndNot(specialMask.AsSingle(), temp); + + x = Vector256.ConditionalSelect( + subnormalMask, + ((x * 8388608.0f).AsUInt32() - Vector256.Create(23u << 23)).AsSingle(), + x + ); + + specialMask = temp.AsUInt32(); + } + + Vector256 vx = x.AsUInt32() - Vector256.Create(V_OFF); + Vector256 n = Vector256.ConvertToSingle(Vector256.ShiftRightArithmetic(vx.AsInt32(), 23)); + + vx = (vx & Vector256.Create(V_MASK)) + Vector256.Create(V_OFF); + + Vector256 r = vx.AsSingle() - Vector256.Create(1.0f); + + Vector256 r2 = r * r; + Vector256 r4 = r2 * r2; + Vector256 r8 = r4 * r4; + + Vector256 q = (Vector256.Create(C10) * r2 + (Vector256.Create(C9) * r + Vector256.Create(C8))) + * r8 + (((Vector256.Create(C7) * r + Vector256.Create(C6)) + * r2 + (Vector256.Create(C5) * r + Vector256.Create(C4))) + * r4 + ((Vector256.Create(C3) * r + Vector256.Create(C2)) + * r2 + (Vector256.Create(C1) * r + Vector256.Create(C0)))); + + return Vector256.ConditionalSelect( + specialMask.AsSingle(), + specialResult, + n * Vector256.Create(V_LN2) + q + ); + } + +#if NET8_0_OR_GREATER + public static Vector512 Invoke(Vector512 x) + { + Vector512 specialResult = x; + + // x is subnormal or infinity or NaN + Vector512 specialMask = Vector512.GreaterThanOrEqual(x.AsUInt32() - Vector512.Create(V_MIN), Vector512.Create(V_MAX - V_MIN)); + + if (specialMask != Vector512.Zero) + { + // float.IsZero(x) ? float.NegativeInfinity : x + Vector512 zeroMask = Vector512.Equals(x, Vector512.Zero); + + specialResult = Vector512.ConditionalSelect( + zeroMask, + Vector512.Create(float.NegativeInfinity), + specialResult + ); + + // (x < 0) ? float.NaN : x + Vector512 lessThanZeroMask = Vector512.LessThan(x, Vector512.Zero); + + specialResult = Vector512.ConditionalSelect( + lessThanZeroMask, + Vector512.Create(float.NaN), + specialResult + ); + + // float.IsZero(x) | (x < 0) | float.IsNaN(x) | float.IsPositiveInfinity(x) + Vector512 temp = zeroMask + | lessThanZeroMask + | ~Vector512.Equals(x, x) + | Vector512.Equals(x, Vector512.Create(float.PositiveInfinity)); + + // subnormal + Vector512 subnormalMask = Vector512.AndNot(specialMask.AsSingle(), temp); + + x = Vector512.ConditionalSelect( + subnormalMask, + ((x * 8388608.0f).AsUInt32() - Vector512.Create(23u << 23)).AsSingle(), + x + ); + + specialMask = temp.AsUInt32(); + } + + Vector512 vx = x.AsUInt32() - Vector512.Create(V_OFF); + Vector512 n = Vector512.ConvertToSingle(Vector512.ShiftRightArithmetic(vx.AsInt32(), 23)); + + vx = (vx & Vector512.Create(V_MASK)) + Vector512.Create(V_OFF); + + Vector512 r = vx.AsSingle() - Vector512.Create(1.0f); + + Vector512 r2 = r * r; + Vector512 r4 = r2 * r2; + Vector512 r8 = r4 * r4; + + Vector512 q = (Vector512.Create(C10) * r2 + (Vector512.Create(C9) * r + Vector512.Create(C8))) + * r8 + (((Vector512.Create(C7) * r + Vector512.Create(C6)) + * r2 + (Vector512.Create(C5) * r + Vector512.Create(C4))) + * r4 + ((Vector512.Create(C3) * r + Vector512.Create(C2)) + * r2 + (Vector512.Create(C1) * r + Vector512.Create(C0)))); + + return Vector512.ConditionalSelect( + specialMask.AsSingle(), + specialResult, + n * Vector512.Create(V_LN2) + q + ); + } +#endif + } + + private readonly struct Log2Operator : IUnaryOperator + { + // This code is based on `vrs4_log2f` from amd/aocl-libm-ose + // Copyright (C) 2021-2022 Advanced Micro Devices, Inc. All rights reserved. + // + // Licensed under the BSD 3-Clause "New" or "Revised" License + // See THIRD-PARTY-NOTICES.TXT for the full license text + + // Spec: + // log2f(x) + // = log2f(x) if x ∈ F and x > 0 + // = x if x = qNaN + // = 0 if x = 1 + // = -inf if x = (-0, 0} + // = NaN otherwise + // + // Assumptions/Expectations + // - Maximum ULP is observed to be at 4 + // - Some FPU Exceptions may not be available + // - Performance is at least 3x + // + // Implementation Notes: + // 1. Range Reduction: + // x = 2^n*(1+f) .... (1) + // where n is exponent and is an integer + // (1+f) is mantissa ∈ [1,2). i.e., 1 ≤ 1+f < 2 .... (2) + // + // From (1), taking log on both sides + // log2(x) = log2(2^n * (1+f)) + // = n + log2(1+f) .... (3) + // + // let z = 1 + f + // log2(z) = log2(k) + log2(z) - log2(k) + // log2(z) = log2(kz) - log2(k) + // + // From (2), range of z is [1, 2) + // by simply dividing range by 'k', z is in [1/k, 2/k) .... (4) + // Best choice of k is the one which gives equal and opposite values + // at extrema +- -+ + // 1 | 2 | + // --- - 1 = - |--- - 1 | + // k | k | .... (5) + // +- -+ + // + // Solving for k, k = 3/2, + // From (4), using 'k' value, range is therefore [-0.3333, 0.3333] + // + // 2. Polynomial Approximation: + // More information refer to tools/sollya/vrs4_logf.sollya + // + // 7th Deg - Error abs: 0x1.04c4ac98p-22 rel: 0x1.2216e6f8p-19 + + private const uint V_MIN = 0x00800000; + private const uint V_MAX = 0x7F800000; + private const uint V_MASK = 0x007FFFFF; + private const uint V_OFF = 0x3F2AAAAB; + + private const float C0 = 0.0f; + private const float C1 = 1.4426951f; + private const float C2 = -0.72134554f; + private const float C3 = 0.48089063f; + private const float C4 = -0.36084408f; + private const float C5 = 0.2888971f; + private const float C6 = -0.23594281f; + private const float C7 = 0.19948183f; + private const float C8 = -0.22616665f; + private const float C9 = 0.21228963f; + + public static float Invoke(float x) => MathF.Log2(x); + + public static Vector128 Invoke(Vector128 x) + { + Vector128 specialResult = x; + + // x is subnormal or infinity or NaN + Vector128 specialMask = Vector128.GreaterThanOrEqual(x.AsUInt32() - Vector128.Create(V_MIN), Vector128.Create(V_MAX - V_MIN)); + + if (specialMask != Vector128.Zero) + { + // float.IsZero(x) ? float.NegativeInfinity : x + Vector128 zeroMask = Vector128.Equals(x, Vector128.Zero); + + specialResult = Vector128.ConditionalSelect( + zeroMask, + Vector128.Create(float.NegativeInfinity), + specialResult + ); + + // (x < 0) ? float.NaN : x + Vector128 lessThanZeroMask = Vector128.LessThan(x, Vector128.Zero); + + specialResult = Vector128.ConditionalSelect( + lessThanZeroMask, + Vector128.Create(float.NaN), + specialResult + ); + + // float.IsZero(x) | (x < 0) | float.IsNaN(x) | float.IsPositiveInfinity(x) + Vector128 temp = zeroMask + | lessThanZeroMask + | ~Vector128.Equals(x, x) + | Vector128.Equals(x, Vector128.Create(float.PositiveInfinity)); + + // subnormal + Vector128 subnormalMask = Vector128.AndNot(specialMask.AsSingle(), temp); + + x = Vector128.ConditionalSelect( + subnormalMask, + ((x * 8388608.0f).AsUInt32() - Vector128.Create(23u << 23)).AsSingle(), + x + ); + + specialMask = temp.AsUInt32(); + } + + Vector128 vx = x.AsUInt32() - Vector128.Create(V_OFF); + Vector128 n = Vector128.ConvertToSingle(Vector128.ShiftRightArithmetic(vx.AsInt32(), 23)); + + vx = (vx & Vector128.Create(V_MASK)) + Vector128.Create(V_OFF); + + Vector128 r = vx.AsSingle() - Vector128.Create(1.0f); + + Vector128 r2 = r * r; + Vector128 r4 = r2 * r2; + Vector128 r8 = r4 * r4; + + Vector128 poly = (Vector128.Create(C9) * r + Vector128.Create(C8)) * r8 + + (((Vector128.Create(C7) * r + Vector128.Create(C6)) * r2 + + (Vector128.Create(C5) * r + Vector128.Create(C4))) * r4 + + ((Vector128.Create(C3) * r + Vector128.Create(C2)) * r2 + + (Vector128.Create(C1) * r + Vector128.Create(C0)))); + + return Vector128.ConditionalSelect( + specialMask.AsSingle(), + specialResult, + n + poly + ); + } + + public static Vector256 Invoke(Vector256 x) + { + Vector256 specialResult = x; + + // x is subnormal or infinity or NaN + Vector256 specialMask = Vector256.GreaterThanOrEqual(x.AsUInt32() - Vector256.Create(V_MIN), Vector256.Create(V_MAX - V_MIN)); + + if (specialMask != Vector256.Zero) + { + // float.IsZero(x) ? float.NegativeInfinity : x + Vector256 zeroMask = Vector256.Equals(x, Vector256.Zero); + + specialResult = Vector256.ConditionalSelect( + zeroMask, + Vector256.Create(float.NegativeInfinity), + specialResult + ); + + // (x < 0) ? float.NaN : x + Vector256 lessThanZeroMask = Vector256.LessThan(x, Vector256.Zero); + + specialResult = Vector256.ConditionalSelect( + lessThanZeroMask, + Vector256.Create(float.NaN), + specialResult + ); + + // float.IsZero(x) | (x < 0) | float.IsNaN(x) | float.IsPositiveInfinity(x) + Vector256 temp = zeroMask + | lessThanZeroMask + | ~Vector256.Equals(x, x) + | Vector256.Equals(x, Vector256.Create(float.PositiveInfinity)); + + // subnormal + Vector256 subnormalMask = Vector256.AndNot(specialMask.AsSingle(), temp); + + x = Vector256.ConditionalSelect( + subnormalMask, + ((x * 8388608.0f).AsUInt32() - Vector256.Create(23u << 23)).AsSingle(), + x + ); + + specialMask = temp.AsUInt32(); + } + + Vector256 vx = x.AsUInt32() - Vector256.Create(V_OFF); + Vector256 n = Vector256.ConvertToSingle(Vector256.ShiftRightArithmetic(vx.AsInt32(), 23)); + + vx = (vx & Vector256.Create(V_MASK)) + Vector256.Create(V_OFF); + + Vector256 r = vx.AsSingle() - Vector256.Create(1.0f); + + Vector256 r2 = r * r; + Vector256 r4 = r2 * r2; + Vector256 r8 = r4 * r4; + + Vector256 poly = (Vector256.Create(C9) * r + Vector256.Create(C8)) * r8 + + (((Vector256.Create(C7) * r + Vector256.Create(C6)) * r2 + + (Vector256.Create(C5) * r + Vector256.Create(C4))) * r4 + + ((Vector256.Create(C3) * r + Vector256.Create(C2)) * r2 + + (Vector256.Create(C1) * r + Vector256.Create(C0)))); + + return Vector256.ConditionalSelect( + specialMask.AsSingle(), + specialResult, + n + poly + ); + } + +#if NET8_0_OR_GREATER + public static Vector512 Invoke(Vector512 x) + { + Vector512 specialResult = x; + + // x is subnormal or infinity or NaN + Vector512 specialMask = Vector512.GreaterThanOrEqual(x.AsUInt32() - Vector512.Create(V_MIN), Vector512.Create(V_MAX - V_MIN)); + + if (specialMask != Vector512.Zero) + { + // float.IsZero(x) ? float.NegativeInfinity : x + Vector512 zeroMask = Vector512.Equals(x, Vector512.Zero); + + specialResult = Vector512.ConditionalSelect( + zeroMask, + Vector512.Create(float.NegativeInfinity), + specialResult + ); + + // (x < 0) ? float.NaN : x + Vector512 lessThanZeroMask = Vector512.LessThan(x, Vector512.Zero); + + specialResult = Vector512.ConditionalSelect( + lessThanZeroMask, + Vector512.Create(float.NaN), + specialResult + ); + + // float.IsZero(x) | (x < 0) | float.IsNaN(x) | float.IsPositiveInfinity(x) + Vector512 temp = zeroMask + | lessThanZeroMask + | ~Vector512.Equals(x, x) + | Vector512.Equals(x, Vector512.Create(float.PositiveInfinity)); + + // subnormal + Vector512 subnormalMask = Vector512.AndNot(specialMask.AsSingle(), temp); + + x = Vector512.ConditionalSelect( + subnormalMask, + ((x * 8388608.0f).AsUInt32() - Vector512.Create(23u << 23)).AsSingle(), + x + ); + + specialMask = temp.AsUInt32(); + } + + Vector512 vx = x.AsUInt32() - Vector512.Create(V_OFF); + Vector512 n = Vector512.ConvertToSingle(Vector512.ShiftRightArithmetic(vx.AsInt32(), 23)); + + vx = (vx & Vector512.Create(V_MASK)) + Vector512.Create(V_OFF); + + Vector512 r = vx.AsSingle() - Vector512.Create(1.0f); + + Vector512 r2 = r * r; + Vector512 r4 = r2 * r2; + Vector512 r8 = r4 * r4; + + Vector512 poly = (Vector512.Create(C9) * r + Vector512.Create(C8)) * r8 + + (((Vector512.Create(C7) * r + Vector512.Create(C6)) * r2 + + (Vector512.Create(C5) * r + Vector512.Create(C4))) * r4 + + ((Vector512.Create(C3) * r + Vector512.Create(C2)) * r2 + + (Vector512.Create(C1) * r + Vector512.Create(C0)))); + + return Vector512.ConditionalSelect( + specialMask.AsSingle(), + specialResult, + n + poly + ); + } +#endif + } + private interface IUnaryOperator { static abstract float Invoke(float x); @@ -1263,14 +3162,17 @@ private interface IBinaryOperator #if NET8_0_OR_GREATER static abstract Vector512 Invoke(Vector512 x, Vector512 y); #endif + } - // Operations for aggregating all lanes in a vector into a single value. - // These are not supported on most implementations. - static virtual float Invoke(Vector128 x) => throw new NotSupportedException(); - static virtual float Invoke(Vector256 x) => throw new NotSupportedException(); + private interface IAggregationOperator : IBinaryOperator + { + static abstract float Invoke(Vector128 x); + static abstract float Invoke(Vector256 x); #if NET8_0_OR_GREATER - static virtual float Invoke(Vector512 x) => throw new NotSupportedException(); + static abstract float Invoke(Vector512 x); #endif + + static virtual float IdentityValue => throw new NotSupportedException(); } private interface ITernaryOperator diff --git a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.netstandard.cs b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.netstandard.cs index 1ffd5d30683e9..b9c1e128aaede 100644 --- a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.netstandard.cs +++ b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/TensorPrimitives.netstandard.cs @@ -1,6 +1,7 @@ // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. +using System.Diagnostics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; @@ -8,14 +9,6 @@ namespace System.Numerics.Tensors { public static partial class TensorPrimitives { - private static unsafe bool IsNegative(float f) => *(int*)&f < 0; - - private static float MaxMagnitude(float x, float y) => MathF.Abs(x) >= MathF.Abs(y) ? x : y; - - private static float MinMagnitude(float x, float y) => MathF.Abs(x) < MathF.Abs(y) ? x : y; - - private static float Log2(float x) => MathF.Log(x, 2); - private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan y) { // Compute the same as: @@ -26,9 +19,9 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan= Vector.Count) + if (Vector.IsHardwareAccelerated && + Vector.Count <= 16 && // currently never greater than 8, but 16 would occur if/when AVX512 is supported, and logic in remainder handling assumes that maximum + x.Length >= Vector.Count) { ref float xRef = ref MemoryMarshal.GetReference(x); ref float yRef = ref MemoryMarshal.GetReference(y); @@ -39,6 +32,7 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan.Count; + int i = 0; do { Vector xVec = AsVector(ref xRef, i); @@ -52,6 +46,21 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan xVec = AsVector(ref xRef, x.Length - Vector.Count); + Vector yVec = AsVector(ref yRef, x.Length - Vector.Count); + + Vector remainderMask = LoadRemainderMaskSingleVector(x.Length - i); + xVec &= remainderMask; + yVec &= remainderMask; + + dotProductVector += xVec * yVec; + xSumOfSquaresVector += xVec * xVec; + ySumOfSquaresVector += yVec * yVec; + } + // Sum the vector lanes into the scalar result. for (int e = 0; e < Vector.Count; e++) { @@ -60,13 +69,16 @@ private static float CosineSimilarityCore(ReadOnlySpan x, ReadOnlySpan x, ReadOnlySpan( - float identityValue, ReadOnlySpan x, TLoad load = default, TAggregate aggregate = default) + ReadOnlySpan x, TLoad load = default, TAggregate aggregate = default) where TLoad : struct, IUnaryOperator - where TAggregate : struct, IBinaryOperator + where TAggregate : struct, IAggregationOperator { - // Initialize the result to the identity value - float result = identityValue; - int i = 0; + if (x.Length == 0) + { + return 0; + } + + float result; - if (Vector.IsHardwareAccelerated && x.Length >= Vector.Count * 2) + if (Vector.IsHardwareAccelerated && load.CanVectorize && x.Length >= Vector.Count) { ref float xRef = ref MemoryMarshal.GetReference(x); // Load the first vector as the initial set of results Vector resultVector = load.Invoke(AsVector(ref xRef, 0)); int oneVectorFromEnd = x.Length - Vector.Count; + int i = Vector.Count; // Aggregate additional vectors into the result as long as there's at // least one full vector left to process. - i = Vector.Count; - do + while (i <= oneVectorFromEnd) { resultVector = aggregate.Invoke(resultVector, load.Invoke(AsVector(ref xRef, i))); i += Vector.Count; } - while (i <= oneVectorFromEnd); + + // Process the last vector in the span, masking off elements already processed. + if (i != x.Length) + { + resultVector = aggregate.Invoke(resultVector, + Vector.ConditionalSelect( + Vector.Equals(LoadRemainderMaskSingleVector(x.Length - i), Vector.Zero), + new Vector(aggregate.IdentityValue), + load.Invoke(AsVector(ref xRef, x.Length - Vector.Count)))); + } // Aggregate the lanes in the vector back into the scalar result - for (int f = 0; f < Vector.Count; f++) + result = resultVector[0]; + for (int f = 1; f < Vector.Count; f++) { result = aggregate.Invoke(result, resultVector[f]); } + + return result; } // Aggregate the remaining items in the input span. - for (; (uint)i < (uint)x.Length; i++) + result = load.Invoke(x[0]); + for (int i = 1; i < x.Length; i++) { result = aggregate.Invoke(result, load.Invoke(x[i])); } @@ -117,42 +145,62 @@ private static float Aggregate( } private static float Aggregate( - float identityValue, ReadOnlySpan x, ReadOnlySpan y, TBinary binary = default, TAggregate aggregate = default) + ReadOnlySpan x, ReadOnlySpan y, TBinary binary = default, TAggregate aggregate = default) where TBinary : struct, IBinaryOperator - where TAggregate : struct, IBinaryOperator + where TAggregate : struct, IAggregationOperator { - // Initialize the result to the identity value - float result = identityValue; - int i = 0; + Debug.Assert(x.Length == y.Length); - if (Vector.IsHardwareAccelerated && x.Length >= Vector.Count * 2) + if (x.Length == 0) { - ref float xRef = ref MemoryMarshal.GetReference(x); - ref float yRef = ref MemoryMarshal.GetReference(y); + return 0; + } + + ref float xRef = ref MemoryMarshal.GetReference(x); + ref float yRef = ref MemoryMarshal.GetReference(y); + float result; + + if (Vector.IsHardwareAccelerated && x.Length >= Vector.Count) + { // Load the first vector as the initial set of results Vector resultVector = binary.Invoke(AsVector(ref xRef, 0), AsVector(ref yRef, 0)); int oneVectorFromEnd = x.Length - Vector.Count; + int i = Vector.Count; // Aggregate additional vectors into the result as long as there's at // least one full vector left to process. - i = Vector.Count; - do + while (i <= oneVectorFromEnd) { resultVector = aggregate.Invoke(resultVector, binary.Invoke(AsVector(ref xRef, i), AsVector(ref yRef, i))); i += Vector.Count; } - while (i <= oneVectorFromEnd); + + // Process the last vector in the spans, masking off elements already processed. + if (i != x.Length) + { + resultVector = aggregate.Invoke(resultVector, + Vector.ConditionalSelect( + Vector.Equals(LoadRemainderMaskSingleVector(x.Length - i), Vector.Zero), + new Vector(aggregate.IdentityValue), + binary.Invoke( + AsVector(ref xRef, x.Length - Vector.Count), + AsVector(ref yRef, x.Length - Vector.Count)))); + } // Aggregate the lanes in the vector back into the scalar result - for (int f = 0; f < Vector.Count; f++) + result = resultVector[0]; + for (int f = 1; f < Vector.Count; f++) { result = aggregate.Invoke(result, resultVector[f]); } + + return result; } // Aggregate the remaining items in the input span. - for (; (uint)i < (uint)x.Length; i++) + result = binary.Invoke(x[0], y[0]); + for (int i = 1; i < x.Length; i++) { result = aggregate.Invoke(result, binary.Invoke(x[i], y[i])); } @@ -160,6 +208,87 @@ private static float Aggregate( return result; } + private static float MinMaxCore(ReadOnlySpan x, TMinMax minMax = default) where TMinMax : struct, IBinaryOperator + { + if (x.IsEmpty) + { + ThrowHelper.ThrowArgument_SpansMustBeNonEmpty(); + } + + // This matches the IEEE 754:2019 `maximum`/`minimum` functions. + // It propagates NaN inputs back to the caller and + // otherwise returns the greater of the inputs. + // It treats +0 as greater than -0 as per the specification. + + float result = x[0]; + int i = 0; + + if (Vector.IsHardwareAccelerated && x.Length >= Vector.Count) + { + ref float xRef = ref MemoryMarshal.GetReference(x); + + // Load the first vector as the initial set of results, and bail immediately + // to scalar handling if it contains any NaNs (which don't compare equally to themselves). + Vector resultVector = AsVector(ref xRef, 0), current; + if (Vector.EqualsAll(resultVector, resultVector)) + { + int oneVectorFromEnd = x.Length - Vector.Count; + i = Vector.Count; + + // Aggregate additional vectors into the result as long as there's at least one full vector left to process. + while (i <= oneVectorFromEnd) + { + // Load the next vector, and early exit on NaN. + current = AsVector(ref xRef, i); + if (!Vector.EqualsAll(current, current)) + { + goto Scalar; + } + + resultVector = minMax.Invoke(resultVector, current); + i += Vector.Count; + } + + // If any elements remain, handle them in one final vector. + if (i != x.Length) + { + current = AsVector(ref xRef, x.Length - Vector.Count); + if (!Vector.EqualsAll(current, current)) + { + goto Scalar; + } + + resultVector = minMax.Invoke(resultVector, current); + } + + // Aggregate the lanes in the vector to create the final scalar result. + for (int f = 0; f < Vector.Count; f++) + { + result = minMax.Invoke(result, resultVector[f]); + } + + return result; + } + } + + // Scalar path used when either vectorization is not supported, the input is too small to vectorize, + // or a NaN is encountered. + Scalar: + for (; (uint)i < (uint)x.Length; i++) + { + float current = x[i]; + + if (float.IsNaN(current)) + { + return current; + } + + result = minMax.Invoke(result, current); + } + + return result; + } + private static void InvokeSpanIntoSpan( ReadOnlySpan x, Span destination, TUnaryOperator op = default) where TUnaryOperator : struct, IUnaryOperator @@ -169,11 +298,13 @@ private static void InvokeSpanIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float dRef = ref MemoryMarshal.GetReference(destination); int i = 0, oneVectorFromEnd; - if (Vector.IsHardwareAccelerated) + if (Vector.IsHardwareAccelerated && op.CanVectorize) { oneVectorFromEnd = x.Length - Vector.Count; if (oneVectorFromEnd >= 0) @@ -191,7 +322,11 @@ private static void InvokeSpanIntoSpan( if (i != x.Length) { int lastVectorIndex = x.Length - Vector.Count; - AsVector(ref dRef, lastVectorIndex) = op.Invoke(AsVector(ref xRef, lastVectorIndex)); + ref Vector dest = ref AsVector(ref dRef, lastVectorIndex); + dest = Vector.ConditionalSelect( + Vector.Equals(LoadRemainderMaskSingleVector(x.Length - i), Vector.Zero), + dest, + op.Invoke(AsVector(ref xRef, lastVectorIndex))); } return; @@ -221,6 +356,9 @@ private static void InvokeSpanSpanIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ValidateInputOutputSpanNonOverlapping(y, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float yRef = ref MemoryMarshal.GetReference(y); ref float dRef = ref MemoryMarshal.GetReference(destination); @@ -245,8 +383,12 @@ private static void InvokeSpanSpanIntoSpan( if (i != x.Length) { int lastVectorIndex = x.Length - Vector.Count; - AsVector(ref dRef, lastVectorIndex) = op.Invoke(AsVector(ref xRef, lastVectorIndex), - AsVector(ref yRef, lastVectorIndex)); + ref Vector dest = ref AsVector(ref dRef, lastVectorIndex); + dest = Vector.ConditionalSelect( + Vector.Equals(LoadRemainderMaskSingleVector(x.Length - i), Vector.Zero), + dest, + op.Invoke(AsVector(ref xRef, lastVectorIndex), + AsVector(ref yRef, lastVectorIndex))); } return; @@ -271,6 +413,8 @@ private static void InvokeSpanScalarIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float dRef = ref MemoryMarshal.GetReference(destination); int i = 0, oneVectorFromEnd; @@ -295,8 +439,11 @@ private static void InvokeSpanScalarIntoSpan( if (i != x.Length) { int lastVectorIndex = x.Length - Vector.Count; - AsVector(ref dRef, lastVectorIndex) = op.Invoke(AsVector(ref xRef, lastVectorIndex), - yVec); + ref Vector dest = ref AsVector(ref dRef, lastVectorIndex); + dest = Vector.ConditionalSelect( + Vector.Equals(LoadRemainderMaskSingleVector(x.Length - i), Vector.Zero), + dest, + op.Invoke(AsVector(ref xRef, lastVectorIndex), yVec)); } return; @@ -327,6 +474,10 @@ private static void InvokeSpanSpanSpanIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ValidateInputOutputSpanNonOverlapping(y, destination); + ValidateInputOutputSpanNonOverlapping(z, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float yRef = ref MemoryMarshal.GetReference(y); ref float zRef = ref MemoryMarshal.GetReference(z); @@ -353,9 +504,13 @@ private static void InvokeSpanSpanSpanIntoSpan( if (i != x.Length) { int lastVectorIndex = x.Length - Vector.Count; - AsVector(ref dRef, lastVectorIndex) = op.Invoke(AsVector(ref xRef, lastVectorIndex), - AsVector(ref yRef, lastVectorIndex), - AsVector(ref zRef, lastVectorIndex)); + ref Vector dest = ref AsVector(ref dRef, lastVectorIndex); + dest = Vector.ConditionalSelect( + Vector.Equals(LoadRemainderMaskSingleVector(x.Length - i), Vector.Zero), + dest, + op.Invoke(AsVector(ref xRef, lastVectorIndex), + AsVector(ref yRef, lastVectorIndex), + AsVector(ref zRef, lastVectorIndex))); } return; @@ -387,6 +542,9 @@ private static void InvokeSpanSpanScalarIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ValidateInputOutputSpanNonOverlapping(y, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float yRef = ref MemoryMarshal.GetReference(y); ref float dRef = ref MemoryMarshal.GetReference(destination); @@ -414,9 +572,13 @@ private static void InvokeSpanSpanScalarIntoSpan( if (i != x.Length) { int lastVectorIndex = x.Length - Vector.Count; - AsVector(ref dRef, lastVectorIndex) = op.Invoke(AsVector(ref xRef, lastVectorIndex), - AsVector(ref yRef, lastVectorIndex), - zVec); + ref Vector dest = ref AsVector(ref dRef, lastVectorIndex); + dest = Vector.ConditionalSelect( + Vector.Equals(LoadRemainderMaskSingleVector(x.Length - i), Vector.Zero), + dest, + op.Invoke(AsVector(ref xRef, lastVectorIndex), + AsVector(ref yRef, lastVectorIndex), + zVec)); } return; @@ -448,6 +610,9 @@ private static void InvokeSpanScalarSpanIntoSpan( ThrowHelper.ThrowArgument_DestinationTooShort(); } + ValidateInputOutputSpanNonOverlapping(x, destination); + ValidateInputOutputSpanNonOverlapping(z, destination); + ref float xRef = ref MemoryMarshal.GetReference(x); ref float zRef = ref MemoryMarshal.GetReference(z); ref float dRef = ref MemoryMarshal.GetReference(destination); @@ -475,9 +640,13 @@ private static void InvokeSpanScalarSpanIntoSpan( if (i != x.Length) { int lastVectorIndex = x.Length - Vector.Count; - AsVector(ref dRef, lastVectorIndex) = op.Invoke(AsVector(ref xRef, lastVectorIndex), - yVec, - AsVector(ref zRef, lastVectorIndex)); + ref Vector dest = ref AsVector(ref dRef, lastVectorIndex); + dest = Vector.ConditionalSelect( + Vector.Equals(LoadRemainderMaskSingleVector(x.Length - i), Vector.Zero), + dest, + op.Invoke(AsVector(ref xRef, lastVectorIndex), + yVec, + AsVector(ref zRef, lastVectorIndex))); } return; @@ -500,10 +669,27 @@ private static ref Vector AsVector(ref float start, int offset) => ref Unsafe.As>( ref Unsafe.Add(ref start, offset)); - private readonly struct AddOperator : IBinaryOperator + private static unsafe bool IsNegative(float f) => *(int*)&f < 0; + + private static unsafe Vector IsNegative(Vector f) => + (Vector)Vector.LessThan((Vector)f, Vector.Zero); + + private static float Log2(float x) => MathF.Log(x, 2); + + private static unsafe Vector LoadRemainderMaskSingleVector(int validItems) + { + Debug.Assert(Vector.Count is 4 or 8 or 16); + + return AsVector( + ref Unsafe.As(ref MemoryMarshal.GetReference(RemainderUInt32Mask_16x16)), + (validItems * 16) + (16 - Vector.Count)); + } + + private readonly struct AddOperator : IAggregationOperator { public float Invoke(float x, float y) => x + y; public Vector Invoke(Vector x, Vector y) => x + y; + public float IdentityValue => 0; } private readonly struct SubtractOperator : IBinaryOperator @@ -527,10 +713,11 @@ public Vector Invoke(Vector x, Vector y) } } - private readonly struct MultiplyOperator : IBinaryOperator + private readonly struct MultiplyOperator : IAggregationOperator { public float Invoke(float x, float y) => x * y; public Vector Invoke(Vector x, Vector y) => x * y; + public float IdentityValue => 1; } private readonly struct DivideOperator : IBinaryOperator @@ -539,8 +726,166 @@ public Vector Invoke(Vector x, Vector y) public Vector Invoke(Vector x, Vector y) => x / y; } + private readonly struct MaxOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public float Invoke(float x, float y) => + x == y ? + (IsNegative(x) ? y : x) : + (y > x ? y : x); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public Vector Invoke(Vector x, Vector y) => + Vector.ConditionalSelect(Vector.Equals(x, y), + Vector.ConditionalSelect(IsNegative(x), y, x), + Vector.Max(x, y)); + } + + private readonly struct MaxPropagateNaNOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public float Invoke(float x, float y) => MathF.Max(x, y); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public Vector Invoke(Vector x, Vector y) => + Vector.ConditionalSelect(Vector.Equals(x, x), + Vector.ConditionalSelect(Vector.Equals(y, y), + Vector.ConditionalSelect(Vector.Equals(x, y), + Vector.ConditionalSelect(IsNegative(x), y, x), + Vector.Max(x, y)), + y), + x); + } + + private readonly struct MaxMagnitudeOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public float Invoke(float x, float y) + { + float xMag = MathF.Abs(x), yMag = MathF.Abs(y); + return + yMag == xMag ? + (IsNegative(x) ? y : x) : + (xMag > yMag ? x : y); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public Vector Invoke(Vector x, Vector y) + { + Vector xMag = Vector.Abs(x), yMag = Vector.Abs(y); + return + Vector.ConditionalSelect(Vector.Equals(xMag, yMag), + Vector.ConditionalSelect(IsNegative(x), y, x), + Vector.ConditionalSelect(Vector.GreaterThan(xMag, yMag), x, y)); + } + } + + private readonly struct MaxMagnitudePropagateNaNOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public float Invoke(float x, float y) + { + float xMag = MathF.Abs(x), yMag = MathF.Abs(y); + return xMag > yMag || float.IsNaN(xMag) || (xMag == yMag && !IsNegative(x)) ? x : y; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public Vector Invoke(Vector x, Vector y) + { + Vector xMag = Vector.Abs(x), yMag = Vector.Abs(y); + return + Vector.ConditionalSelect(Vector.Equals(x, x), + Vector.ConditionalSelect(Vector.Equals(y, y), + Vector.ConditionalSelect(Vector.Equals(xMag, yMag), + Vector.ConditionalSelect(IsNegative(x), y, x), + Vector.ConditionalSelect(Vector.GreaterThan(xMag, yMag), x, y)), + y), + x); + } + } + + private readonly struct MinOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public float Invoke(float x, float y) => + x == y ? + (IsNegative(y) ? y : x) : + (y < x ? y : x); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public Vector Invoke(Vector x, Vector y) => + Vector.ConditionalSelect(Vector.Equals(x, y), + Vector.ConditionalSelect(IsNegative(y), y, x), + Vector.Min(x, y)); + } + + private readonly struct MinPropagateNaNOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public float Invoke(float x, float y) => MathF.Min(x, y); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public Vector Invoke(Vector x, Vector y) => + Vector.ConditionalSelect(Vector.Equals(x, x), + Vector.ConditionalSelect(Vector.Equals(y, y), + Vector.ConditionalSelect(Vector.Equals(x, y), + Vector.ConditionalSelect(IsNegative(x), x, y), + Vector.Min(x, y)), + y), + x); + } + + private readonly struct MinMagnitudeOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public float Invoke(float x, float y) + { + float xMag = MathF.Abs(x), yMag = MathF.Abs(y); + return + yMag == xMag ? + (IsNegative(y) ? y : x) : + (yMag < xMag ? y : x); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public Vector Invoke(Vector x, Vector y) + { + Vector xMag = Vector.Abs(x), yMag = Vector.Abs(y); + return + Vector.ConditionalSelect(Vector.Equals(yMag, xMag), + Vector.ConditionalSelect(IsNegative(y), y, x), + Vector.ConditionalSelect(Vector.LessThan(yMag, xMag), y, x)); + } + } + + private readonly struct MinMagnitudePropagateNaNOperator : IBinaryOperator + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public float Invoke(float x, float y) + { + float xMag = MathF.Abs(x), yMag = MathF.Abs(y); + return xMag < yMag || float.IsNaN(xMag) || (xMag == yMag && IsNegative(x)) ? x : y; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public Vector Invoke(Vector x, Vector y) + { + Vector xMag = Vector.Abs(x), yMag = Vector.Abs(y); + + return + Vector.ConditionalSelect(Vector.Equals(x, x), + Vector.ConditionalSelect(Vector.Equals(y, y), + Vector.ConditionalSelect(Vector.Equals(yMag, xMag), + Vector.ConditionalSelect(IsNegative(x), x, y), + Vector.ConditionalSelect(Vector.LessThan(xMag, yMag), x, y)), + y), + x); + } + } + private readonly struct NegateOperator : IUnaryOperator { + public bool CanVectorize => true; public float Invoke(float x) => -x; public Vector Invoke(Vector x) => -x; } @@ -559,24 +904,54 @@ public Vector Invoke(Vector x, Vector y) private readonly struct IdentityOperator : IUnaryOperator { + public bool CanVectorize => true; public float Invoke(float x) => x; public Vector Invoke(Vector x) => x; } private readonly struct SquaredOperator : IUnaryOperator { + public bool CanVectorize => true; public float Invoke(float x) => x * x; public Vector Invoke(Vector x) => x * x; } private readonly struct AbsoluteOperator : IUnaryOperator { + public bool CanVectorize => true; public float Invoke(float x) => MathF.Abs(x); public Vector Invoke(Vector x) => Vector.Abs(x); } + private readonly struct LogOperator : IUnaryOperator + { + public bool CanVectorize => false; + + public float Invoke(float x) => MathF.Log(x); + + public Vector Invoke(Vector x) + { + // Vectorizing requires shift right support, which is .NET 7 or later + throw new NotImplementedException(); + } + } + + private readonly struct Log2Operator : IUnaryOperator + { + public bool CanVectorize => false; + + public float Invoke(float x) => Log2(x); + + public Vector Invoke(Vector x) + { + // Vectorizing requires shift right support, which is .NET 7 or later + throw new NotImplementedException(); + } + } + private interface IUnaryOperator { + bool CanVectorize { get; } float Invoke(float x); Vector Invoke(Vector x); } @@ -587,6 +962,11 @@ private interface IBinaryOperator Vector Invoke(Vector x, Vector y); } + private interface IAggregationOperator : IBinaryOperator + { + float IdentityValue { get; } + } + private interface ITernaryOperator { float Invoke(float x, float y, float z); diff --git a/src/libraries/System.Numerics.Tensors/src/System/ThrowHelper.cs b/src/libraries/System.Numerics.Tensors/src/System/ThrowHelper.cs index 902b27787e856..272991aed44ab 100644 --- a/src/libraries/System.Numerics.Tensors/src/System/ThrowHelper.cs +++ b/src/libraries/System.Numerics.Tensors/src/System/ThrowHelper.cs @@ -18,5 +18,9 @@ public static void ThrowArgument_SpansMustHaveSameLength() => [DoesNotReturn] public static void ThrowArgument_SpansMustBeNonEmpty() => throw new ArgumentException(SR.Argument_SpansMustBeNonEmpty); + + [DoesNotReturn] + public static void ThrowArgument_InputAndDestinationSpanMustNotOverlap() => + throw new ArgumentException(SR.Argument_InputAndDestinationSpanMustNotOverlap, "destination"); } } diff --git a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs index 288cd3edb8d5e..256ece4283b3e 100644 --- a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs +++ b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.cs @@ -13,6 +13,7 @@ namespace System.Numerics.Tensors.Tests { public static partial class TensorPrimitivesTests { + #region Test Utilities private const double Tolerance = 0.0001; public static IEnumerable TensorLengthsIncluding0 => @@ -41,423 +42,386 @@ private static void FillTensor(Span tensor) } } - private static float NextSingle() - { + private static float NextSingle() => // For testing purposes, get a mix of negative and positive values. - return (float)((s_random.NextDouble() * 2) - 1); + (float)((s_random.NextDouble() * 2) - 1); + + private static unsafe float MathFMaxMagnitude(float x, float y) + { + float ax = MathF.Abs(x), ay = MathF.Abs(y); + return (ax > ay) || float.IsNaN(ax) || (ax == ay && *(int*)&x >= 0) ? x : y; + } + + private static unsafe float MathFMinMagnitude(float x, float y) + { + float ax = MathF.Abs(x), ay = MathF.Abs(y); + return (ax < ay) || float.IsNaN(ax) || (ax == ay && *(int*)&x < 0) ? x : y; } + #endregion + #region Abs [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void AddTwoTensors(int tensorLength) + public static void Abs(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.Add(x, y, destination); + TensorPrimitives.Abs(x, destination); - for (int i = 0; i < tensorLength; i++) + for (int i = 0; i < x.Length; i++) { - Assert.Equal(x[i] + y[i], destination[i], Tolerance); + Assert.Equal(MathF.Abs(x[i]), destination[i], Tolerance); } } [Theory] - [MemberData(nameof(TensorLengths))] - public static void AddTwoTensors_ThrowsForMismatchedLengths(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Abs_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - using BoundedMemory destination = CreateTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - Assert.Throws(() => TensorPrimitives.Add(x, y, destination)); + TensorPrimitives.Abs(x, x); + + for (int i = 0; i < x.Length; i++) + { + Assert.Equal(MathF.Abs(xOrig[i]), x[i], Tolerance); + } } [Theory] [MemberData(nameof(TensorLengths))] - public static void AddTwoTensors_ThrowsForTooShortDestination(int tensorLength) + public static void Abs_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Add(x, y, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Abs(x, destination)); + } + + [Fact] + public static void Abs_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Abs(array.AsSpan(1, 5), array.AsSpan(0, 5))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Abs(array.AsSpan(1, 5), array.AsSpan(2, 5))); } + #endregion + #region Add [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void AddTensorAndScalar(int tensorLength) + public static void Add_TwoTensors(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); + using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); TensorPrimitives.Add(x, y, destination); - for (int i = 0; i < tensorLength; i++) { - Assert.Equal(x[i] + y, destination[i], Tolerance); + Assert.Equal(x[i] + y[i], destination[i], Tolerance); } - } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void AddTensorAndScalar_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength - 1); + float[] xOrig = x.Span.ToArray(); - AssertExtensions.Throws("destination", () => TensorPrimitives.Add(x, y, destination)); + // Validate that the destination can be the same as an input. + TensorPrimitives.Add(x, x, x); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(xOrig[i] + xOrig[i], x[i], Tolerance); + } } [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void SubtractTwoTensors(int tensorLength) + public static void Add_TwoTensors_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - TensorPrimitives.Subtract(x, y, destination); + TensorPrimitives.Add(x, x, x); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(x[i] - y[i], destination[i], Tolerance); + Assert.Equal(xOrig[i] + xOrig[i], x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void SubtractTwoTensors_ThrowsForMismatchedLengths(int tensorLength) + public static void Add_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); using BoundedMemory destination = CreateTensor(tensorLength); - Assert.Throws(() => TensorPrimitives.Subtract(x, y, destination)); + Assert.Throws(() => TensorPrimitives.Add(x, y, destination)); + Assert.Throws(() => TensorPrimitives.Add(y, x, destination)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void SubtractTwoTensors_ThrowsForTooShortDestination(int tensorLength) + public static void Add_TwoTensors_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(x, y, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Add(x, y, destination)); + } + + [Fact] + public static void Add_TwoTensors_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Add(array.AsSpan(1, 2), array.AsSpan(5, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Add(array.AsSpan(1, 2), array.AsSpan(5, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Add(array.AsSpan(1, 2), array.AsSpan(5, 2), array.AsSpan(4, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Add(array.AsSpan(1, 2), array.AsSpan(5, 2), array.AsSpan(6, 2))); } [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void SubtractTensorAndScalar(int tensorLength) + public static void Add_TensorScalar(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); float y = NextSingle(); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.Subtract(x, y, destination); + TensorPrimitives.Add(x, y, destination); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(x[i] - y, destination[i], Tolerance); + Assert.Equal(x[i] + y, destination[i], Tolerance); } } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void SubtractTensorAndScalar_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(x, y, destination)); - } - [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void MultiplyTwoTensors(int tensorLength) + public static void Add_TensorScalar_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + float y = NextSingle(); - TensorPrimitives.Multiply(x, y, destination); + TensorPrimitives.Add(x, y, x); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(x[i] * y[i], destination[i], Tolerance); + Assert.Equal(xOrig[i] + y, x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void MultiplyTwoTensors_ThrowsForMismatchedLengths(int tensorLength) + public static void Add_TensorScalar_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - using BoundedMemory destination = CreateTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength - 1); - Assert.Throws(() => TensorPrimitives.Multiply(x, y, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Add(x, y, destination)); } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void MultiplyTwoTensors_ThrowsForTooShortDestination(int tensorLength) + [Fact] + public static void Add_TensorScalar_ThrowsForOverlapppingInputsWithOutputs() { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(x, y, destination)); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Add(array.AsSpan(1, 2), 42, array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Add(array.AsSpan(1, 2), 42, array.AsSpan(2, 2))); } + #endregion + #region AddMultiply [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void MultiplyTensorAndScalar(int tensorLength) + public static void AddMultiply_ThreeTensors(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory multiplier = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.Multiply(x, y, destination); + TensorPrimitives.AddMultiply(x, y, multiplier, destination); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(x[i] * y, destination[i], Tolerance); + Assert.Equal((x[i] + y[i]) * multiplier[i], destination[i], Tolerance); } } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void MultiplyTensorAndScalar_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(x, y, destination)); - } - [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void DivideTwoTensors(int tensorLength) + public static void AddMultiply_ThreeTensors_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - TensorPrimitives.Divide(x, y, destination); + TensorPrimitives.AddMultiply(x, x, x, x); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(x[i] / y[i], destination[i], Tolerance); + Assert.Equal((xOrig[i] + xOrig[i]) * xOrig[i], x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void DivideTwoTensors_ThrowsForMismatchedLengths(int tensorLength) + public static void AddMultiply_ThreeTensors_ThrowsForMismatchedLengths(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory z = CreateAndFillTensor(tensorLength - 1); using BoundedMemory destination = CreateTensor(tensorLength); - Assert.Throws(() => TensorPrimitives.Divide(x, y, destination)); + Assert.Throws(() => TensorPrimitives.AddMultiply(x, y, z, destination)); + Assert.Throws(() => TensorPrimitives.AddMultiply(x, z, y, destination)); + Assert.Throws(() => TensorPrimitives.AddMultiply(z, x, y, destination)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void DivideTwoTensors_ThrowsForTooShortDestination(int tensorLength) + public static void AddMultiply_ThreeTensors_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory multiplier = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(x, y, destination)); - } - - [Theory] - [MemberData(nameof(TensorLengthsIncluding0))] - public static void DivideTensorAndScalar(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength); - - TensorPrimitives.Divide(x, y, destination); - - for (int i = 0; i < tensorLength; i++) - { - Assert.Equal(x[i] / y, destination[i], Tolerance); - } + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void DivideTensorAndScalar_ThrowsForTooShortDestination(int tensorLength) + [Fact] + public static void AddMultiply_ThreeTensors_ThrowsForOverlapppingInputsWithOutputs() { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(x, y, destination)); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(5, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(6, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(8, 2))); } [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void NegateTensor(int tensorLength) + public static void AddMultiply_TensorTensorScalar(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + float multiplier = NextSingle(); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.Negate(x, destination); + TensorPrimitives.AddMultiply(x, y, multiplier, destination); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(-x[i], destination[i], Tolerance); + Assert.Equal((x[i] + y[i]) * multiplier, destination[i], Tolerance); } } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void NegateTensor_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.Negate(x, destination)); - } - [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void AddTwoTensorsAndMultiplyWithThirdTensor(int tensorLength) + public static void AddMultiply_TensorTensorScalar_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory multiplier = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + float multiplier = NextSingle(); - TensorPrimitives.AddMultiply(x, y, multiplier, destination); + TensorPrimitives.AddMultiply(x, x, multiplier, x); for (int i = 0; i < tensorLength; i++) { - Assert.Equal((x[i] + y[i]) * multiplier[i], destination[i], Tolerance); + Assert.Equal((xOrig[i] + xOrig[i]) * multiplier, x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void AddTwoTensorsAndMultiplyWithThirdTensor_ThrowsForMismatchedLengths_x_y(int tensorLength) + public static void AddMultiply_TensorTensorScalar_ThrowsForMismatchedLengths_x_y(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - using BoundedMemory multiplier = CreateAndFillTensor(tensorLength); + float multiplier = NextSingle(); using BoundedMemory destination = CreateTensor(tensorLength); Assert.Throws(() => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); + Assert.Throws(() => TensorPrimitives.AddMultiply(y, x, multiplier, destination)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void AddTwoTensorsAndMultiplyWithThirdTensor_ThrowsForMismatchedLengths_x_multiplier(int tensorLength) + public static void AddMultiply_TensorTensorScalar_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory multiplier = CreateAndFillTensor(tensorLength - 1); - using BoundedMemory destination = CreateTensor(tensorLength); + float multiplier = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength - 1); - Assert.Throws(() => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void AddTwoTensorsAndMultiplyWithThirdTensor_ThrowsForTooShortDestination(int tensorLength) + [Fact] + public static void AddMultiply_TensorTensorScalar_ThrowsForOverlapppingInputsWithOutputs() { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory multiplier = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), 42, array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), 42, array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), 42, array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), array.AsSpan(4, 2), 42, array.AsSpan(5, 2))); } [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void AddTwoTensorsAndMultiplyWithScalar(int tensorLength) + public static void AddMultiply_TensorScalarTensor(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - float multiplier = NextSingle(); + float y = NextSingle(); + using BoundedMemory multiplier = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); TensorPrimitives.AddMultiply(x, y, multiplier, destination); for (int i = 0; i < tensorLength; i++) { - Assert.Equal((x[i] + y[i]) * multiplier, destination[i], Tolerance); + Assert.Equal((x[i] + y) * multiplier[i], destination[i], Tolerance); } } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void AddTwoTensorsAndMultiplyWithScalar_ThrowsForMismatchedLengths_x_y(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - float multiplier = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength); - - Assert.Throws(() => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); - } - - [Theory] - [MemberData(nameof(TensorLengths))] - public static void AddTwoTensorsAndMultiplyWithScalar_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - float multiplier = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); - } - [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void AddTensorAndScalarAndMultiplyWithTensor(int tensorLength) + public static void AddMultiply_TensorScalarTensor_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); float y = NextSingle(); - using BoundedMemory multiplier = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.AddMultiply(x, y, multiplier, destination); + TensorPrimitives.AddMultiply(x, y, x, x); for (int i = 0; i < tensorLength; i++) { - Assert.Equal((x[i] + y) * multiplier[i], destination[i], Tolerance); + Assert.Equal((xOrig[i] + y) * xOrig[i], x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void AddTensorAndScalarAndMultiplyWithTensor_ThrowsForMismatchedLengths_x_z(int tensorLength) + public static void AddMultiply_TensorScalarTensor_ThrowsForMismatchedLengths_x_z(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); float y = NextSingle(); - using BoundedMemory multiplier = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory z = CreateAndFillTensor(tensorLength - 1); using BoundedMemory destination = CreateTensor(tensorLength); - Assert.Throws(() => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); + Assert.Throws(() => TensorPrimitives.AddMultiply(x, y, z, destination)); + Assert.Throws(() => TensorPrimitives.AddMultiply(z, y, x, destination)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void AddTensorAndScalarAndMultiplyWithTensor_ThrowsForTooShortDestination(int tensorLength) + public static void AddMultiply_TensorScalarTensor_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); float y = NextSingle(); @@ -467,355 +431,281 @@ public static void AddTensorAndScalarAndMultiplyWithTensor_ThrowsForTooShortDest AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(x, y, multiplier, destination)); } + [Fact] + public static void AddMultiply_TensorScalarTensor_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), 42, array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), 42, array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), 42, array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.AddMultiply(array.AsSpan(1, 2), 42, array.AsSpan(4, 2), array.AsSpan(5, 2))); + } + #endregion + + #region Cosh [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void MultiplyTwoTensorsAndAddWithThirdTensor(int tensorLength) + public static void Cosh(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory addend = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.MultiplyAdd(x, y, addend, destination); + TensorPrimitives.Cosh(x, destination); for (int i = 0; i < tensorLength; i++) { - Assert.Equal((x[i] * y[i]) + addend[i], destination[i], Tolerance); + Assert.Equal(MathF.Cosh(x[i]), destination[i], Tolerance); } } [Theory] - [MemberData(nameof(TensorLengths))] - public static void MultiplyTwoTensorsAndAddWithThirdTensor_ThrowsForMismatchedLengths_x_y(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Cosh_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - using BoundedMemory addend = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); - - Assert.Throws(() => TensorPrimitives.MultiplyAdd(x, y, addend, destination)); - } + float[] xOrig = x.Span.ToArray(); - [Theory] - [MemberData(nameof(TensorLengths))] - public static void MultiplyTwoTensorsAndAddWithThirdTensor_ThrowsForMismatchedLengths_x_multiplier(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory addend = CreateAndFillTensor(tensorLength - 1); - using BoundedMemory destination = CreateTensor(tensorLength); + TensorPrimitives.Cosh(x, x); - Assert.Throws(() => TensorPrimitives.MultiplyAdd(x, y, addend, destination)); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Cosh(xOrig[i]), x[i], Tolerance); + } } [Theory] [MemberData(nameof(TensorLengths))] - public static void MultiplyTwoTensorsAndAddWithThirdTensor_ThrowsForTooShortDestination(int tensorLength) + public static void Cosh_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory addend = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(x, y, addend, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Cosh(x, destination)); } - [Theory] - [MemberData(nameof(TensorLengthsIncluding0))] - public static void MultiplyTwoTensorsAndAddWithScalar(int tensorLength) + [Fact] + public static void Cosh_ThrowsForOverlapppingInputsWithOutputs() { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - float addend = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength); - - TensorPrimitives.MultiplyAdd(x, y, addend, destination); - - for (int i = 0; i < tensorLength; i++) - { - Assert.Equal((x[i] * y[i]) + addend, destination[i], Tolerance); - } + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Cosh(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Cosh(array.AsSpan(1, 2), array.AsSpan(2, 2))); } + #endregion + #region CosineSimilarity [Theory] [MemberData(nameof(TensorLengths))] - public static void MultiplyTwoTensorsAndAddWithScalar_ThrowsForTooShortDestination(int tensorLength) + public static void CosineSimilarity_ThrowsForMismatchedLengths(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); - float addend = NextSingle(); - using BoundedMemory destination = CreateTensor(tensorLength - 1); + using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(x, y, addend, destination)); + Assert.Throws(() => TensorPrimitives.CosineSimilarity(x, y)); + Assert.Throws(() => TensorPrimitives.CosineSimilarity(y, x)); } - [Theory] - [MemberData(nameof(TensorLengthsIncluding0))] - public static void MultiplyTensorAndScalarAndAddWithTensor(int tensorLength) + [Fact] + public static void CosineSimilarity_ThrowsForEmpty() { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); - using BoundedMemory addend = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); - - TensorPrimitives.MultiplyAdd(x, y, addend, destination); - - for (int i = 0; i < tensorLength; i++) - { - Assert.Equal((x[i] * y) + addend[i], destination[i], Tolerance); - } + Assert.Throws(() => TensorPrimitives.CosineSimilarity(ReadOnlySpan.Empty, ReadOnlySpan.Empty)); + Assert.Throws(() => TensorPrimitives.CosineSimilarity(ReadOnlySpan.Empty, CreateTensor(1))); + Assert.Throws(() => TensorPrimitives.CosineSimilarity(CreateTensor(1), ReadOnlySpan.Empty)); } [Theory] - [MemberData(nameof(TensorLengths))] - public static void MultiplyTensorAndScalarAndAddWithTensor_ThrowsForTooShortDestination(int tensorLength) + [InlineData(new float[] { 3, 2, 0, 5 }, new float[] { 1, 0, 0, 0 }, 0.48666f)] + [InlineData(new float[] { 1, 1, 1, 1, 1, 0 }, new float[] { 1, 1, 1, 1, 0, 1 }, 0.80f)] + public static void CosineSimilarity_KnownValues(float[] x, float[] y, float expectedResult) { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - float y = NextSingle(); - using BoundedMemory addend = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(x, y, addend, destination)); + Assert.Equal(expectedResult, TensorPrimitives.CosineSimilarity(x, y), Tolerance); } [Theory] - [MemberData(nameof(TensorLengthsIncluding0))] - public static void ExpTensor(int tensorLength) + [MemberData(nameof(TensorLengths))] + public static void CosineSimilarity(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); - - TensorPrimitives.Exp(x, destination); + using BoundedMemory y = CreateAndFillTensor(tensorLength); - for (int i = 0; i < tensorLength; i++) + float dot = 0f, squareX = 0f, squareY = 0f; + for (int i = 0; i < x.Length; i++) { - Assert.Equal(MathF.Exp(x[i]), destination[i], Tolerance); + dot += x[i] * y[i]; + squareX += x[i] * x[i]; + squareY += y[i] * y[i]; } - } - - [Theory] - [MemberData(nameof(TensorLengths))] - public static void ExpTensor_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Exp(x, destination)); + Assert.Equal(dot / (Math.Sqrt(squareX) * Math.Sqrt(squareY)), TensorPrimitives.CosineSimilarity(x, y), Tolerance); } + #endregion - [Theory] - [MemberData(nameof(TensorLengthsIncluding0))] - public static void LogTensor(int tensorLength) + #region Distance + [Fact] + public static void Distance_ThrowsForEmpty() { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); - - TensorPrimitives.Log(x, destination); - - for (int i = 0; i < tensorLength; i++) - { - Assert.Equal(MathF.Log(x[i]), destination[i], Tolerance); - } + Assert.Throws(() => TensorPrimitives.Distance(ReadOnlySpan.Empty, ReadOnlySpan.Empty)); + Assert.Throws(() => TensorPrimitives.Distance(ReadOnlySpan.Empty, CreateTensor(1))); + Assert.Throws(() => TensorPrimitives.Distance(CreateTensor(1), ReadOnlySpan.Empty)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void LogTensor_ThrowsForTooShortDestination(int tensorLength) + public static void Distance_ThrowsForMismatchedLengths(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); + using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Log(x, destination)); + Assert.Throws(() => TensorPrimitives.Distance(x, y)); + Assert.Throws(() => TensorPrimitives.Distance(y, x)); } [Theory] - [MemberData(nameof(TensorLengthsIncluding0))] - public static void Log2(int tensorLength) + [InlineData(new float[] { 3, 2 }, new float[] { 4, 1 }, 1.4142f)] + [InlineData(new float[] { 0, 4 }, new float[] { 6, 2 }, 6.3245f)] + [InlineData(new float[] { 1, 2, 3 }, new float[] { 4, 5, 6 }, 5.1961f)] + [InlineData(new float[] { 5, 1, 6, 10 }, new float[] { 7, 2, 8, 4 }, 6.7082f)] + public static void Distance_KnownValues(float[] x, float[] y, float expectedResult) { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); - - TensorPrimitives.Log2(x, destination); - - for (int i = 0; i < tensorLength; i++) - { - Assert.Equal(MathF.Log(x[i], 2), destination[i], Tolerance); - } + Assert.Equal(expectedResult, TensorPrimitives.Distance(x, y), Tolerance); } [Theory] [MemberData(nameof(TensorLengths))] - public static void Log2_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.Log2(x, destination)); - } - - [Theory] - [MemberData(nameof(TensorLengthsIncluding0))] - public static void CoshTensor(int tensorLength) + public static void Distance(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); - - TensorPrimitives.Cosh(x, destination); + using BoundedMemory y = CreateAndFillTensor(tensorLength); - for (int i = 0; i < tensorLength; i++) + float distance = 0f; + for (int i = 0; i < x.Length; i++) { - Assert.Equal(MathF.Cosh(x[i]), destination[i], Tolerance); + distance += (x[i] - y[i]) * (x[i] - y[i]); } - } - - [Theory] - [MemberData(nameof(TensorLengths))] - public static void CoshTensor_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Cosh(x, destination)); + Assert.Equal(Math.Sqrt(distance), TensorPrimitives.Distance(x, y), Tolerance); } + #endregion + #region Divide [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void SinhTensor(int tensorLength) + public static void Divide_TwoTensors(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.Sinh(x, destination); + TensorPrimitives.Divide(x, y, destination); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(MathF.Sinh(x[i]), destination[i], Tolerance); + Assert.Equal(x[i] / y[i], destination[i], Tolerance); } } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void SinhTensor_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.Sinh(x, destination)); - } - [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void TanhTensor(int tensorLength) + public static void Divide_TwoTensors_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - TensorPrimitives.Tanh(x, destination); + TensorPrimitives.Divide(x, x, x); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(MathF.Tanh(x[i]), destination[i], Tolerance); + Assert.Equal(xOrig[i] / xOrig[i], x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void TanhTensor_ThrowsForTooShortDestination(int tensorLength) + public static void Divide_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); + using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory destination = CreateTensor(tensorLength); - AssertExtensions.Throws("destination", () => TensorPrimitives.Tanh(x, destination)); + Assert.Throws(() => TensorPrimitives.Divide(x, y, destination)); + Assert.Throws(() => TensorPrimitives.Divide(y, x, destination)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void CosineSimilarity_ThrowsForMismatchedLengths_x_y(int tensorLength) + public static void Divide_TwoTensors_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); - Assert.Throws(() => TensorPrimitives.CosineSimilarity(x, y)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(x, y, destination)); } [Fact] - public static void CosineSimilarity_ThrowsForEmpty_x_y() - { - Assert.Throws(() => TensorPrimitives.CosineSimilarity(ReadOnlySpan.Empty, ReadOnlySpan.Empty)); - Assert.Throws(() => TensorPrimitives.CosineSimilarity(ReadOnlySpan.Empty, CreateTensor(1))); - Assert.Throws(() => TensorPrimitives.CosineSimilarity(CreateTensor(1), ReadOnlySpan.Empty)); - } - - [Theory] - [InlineData(new float[] { 3, 2, 0, 5 }, new float[] { 1, 0, 0, 0 }, 0.48666f)] - [InlineData(new float[] { 1, 1, 1, 1, 1, 0 }, new float[] { 1, 1, 1, 1, 0, 1 }, 0.80f)] - public static void CosineSimilarity_KnownValues(float[] x, float[] y, float expectedResult) + public static void Divide_TwoTensors_ThrowsForOverlapppingInputsWithOutputs() { - Assert.Equal(expectedResult, TensorPrimitives.CosineSimilarity(x, y), Tolerance); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(5, 2))); } [Theory] - [MemberData(nameof(TensorLengths))] - public static void CosineSimilarity(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Divide_TensorScalar(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength); - float dot = 0f, squareX = 0f, squareY = 0f; - for (int i = 0; i < x.Length; i++) + TensorPrimitives.Divide(x, y, destination); + + for (int i = 0; i < tensorLength; i++) { - dot += x[i] * y[i]; - squareX += x[i] * x[i]; - squareY += y[i] * y[i]; + Assert.Equal(x[i] / y, destination[i], Tolerance); } - - Assert.Equal(dot / (Math.Sqrt(squareX) * Math.Sqrt(squareY)), TensorPrimitives.CosineSimilarity(x, y), Tolerance); - } - - [Fact] - public static void Distance_ThrowsForEmpty_x_y() - { - Assert.Throws(() => TensorPrimitives.Distance(ReadOnlySpan.Empty, ReadOnlySpan.Empty)); - Assert.Throws(() => TensorPrimitives.Distance(ReadOnlySpan.Empty, CreateTensor(1))); - Assert.Throws(() => TensorPrimitives.Distance(CreateTensor(1), ReadOnlySpan.Empty)); } [Theory] - [MemberData(nameof(TensorLengths))] - public static void Distance_ThrowsForMismatchedLengths_x_y(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Divide_TensorScalar_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + float[] xOrig = x.Span.ToArray(); + float y = NextSingle(); - Assert.Throws(() => TensorPrimitives.Distance(x, y)); - } + TensorPrimitives.Divide(x, y, x); - [Theory] - [InlineData(new float[] { 3, 2 }, new float[] { 4, 1 }, 1.4142f)] - [InlineData(new float[] { 0, 4 }, new float[] { 6, 2 }, 6.3245f)] - [InlineData(new float[] { 1, 2, 3 }, new float[] { 4, 5, 6 }, 5.1961f)] - [InlineData(new float[] { 5, 1, 6, 10 }, new float[] { 7, 2, 8, 4 }, 6.7082f)] - public static void Distance_KnownValues(float[] x, float[] y, float expectedResult) - { - Assert.Equal(expectedResult, TensorPrimitives.Distance(x, y), Tolerance); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(xOrig[i] / y, x[i], Tolerance); + } } [Theory] [MemberData(nameof(TensorLengths))] - public static void Distance(int tensorLength) + public static void Divide_TensorScalar_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength - 1); - float distance = 0f; - for (int i = 0; i < x.Length; i++) - { - distance += (x[i] - y[i]) * (x[i] - y[i]); - } + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(x, y, destination)); + } - Assert.Equal(Math.Sqrt(distance), TensorPrimitives.Distance(x, y), Tolerance); + [Fact] + public static void Divide_TensorScalar_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Divide(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(5, 2))); } + #endregion + #region Dot [Theory] [MemberData(nameof(TensorLengths))] public static void Dot_ThrowsForMismatchedLengths_x_y(int tensorLength) @@ -824,6 +714,7 @@ public static void Dot_ThrowsForMismatchedLengths_x_y(int tensorLength) using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); Assert.Throws(() => TensorPrimitives.Dot(x, y)); + Assert.Throws(() => TensorPrimitives.Dot(y, x)); } [Theory] @@ -851,127 +742,59 @@ public static void Dot(int tensorLength) Assert.Equal(dot, TensorPrimitives.Dot(x, y), Tolerance); } + #endregion + #region Exp [Theory] - [InlineData(new float[] { 1, 2, 3 }, 3.7416575f)] - [InlineData(new float[] { 3, 4 }, 5)] - [InlineData(new float[] { 3 }, 3)] - [InlineData(new float[] { 3, 4, 1, 2 }, 5.477226)] - [InlineData(new float[] { }, 0f)] - public static void Norm_KnownValues(float[] x, float expectedResult) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Exp(int tensorLength) { - Assert.Equal(expectedResult, TensorPrimitives.Norm(x), Tolerance); + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.Exp(x, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Exp(x[i]), destination[i], Tolerance); + } } [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void Norm(int tensorLength) + public static void Exp_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - float sumOfSquares = 0f; - for (int i = 0; i < x.Length; i++) + TensorPrimitives.Exp(x, x); + + for (int i = 0; i < tensorLength; i++) { - sumOfSquares += x[i] * x[i]; + Assert.Equal(MathF.Exp(xOrig[i]), x[i], Tolerance); } - - Assert.Equal(Math.Sqrt(sumOfSquares), TensorPrimitives.Norm(x), Tolerance); } [Theory] [MemberData(nameof(TensorLengths))] - public static void SoftMax_ThrowsForTooShortDestination(int tensorLength) + public static void Exp_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.SoftMax(x, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Exp(x, destination)); } - [Theory] - [InlineData(new float[] { 3, 1, .2f }, new float[] { 0.8360188f, 0.11314284f, 0.05083836f })] - [InlineData(new float[] { 3, 4, 1 }, new float[] { 0.2594f, 0.705384f, 0.0351f })] - [InlineData(new float[] { 5, 3 }, new float[] { 0.8807f, 0.1192f })] - [InlineData(new float[] { 4, 2, 1, 9 }, new float[] { 0.0066f, 9.04658e-4f, 3.32805e-4f, 0.9920f})] - public static void SoftMax(float[] x, float[] expectedResult) + [Fact] + public static void Exp_ThrowsForOverlapppingInputsWithOutputs() { - using BoundedMemory dest = CreateTensor(x.Length); - TensorPrimitives.SoftMax(x, dest); - - for (int i = 0; i < x.Length; i++) - { - Assert.Equal(expectedResult[i], dest[i], Tolerance); - } - } - - [Fact] - public static void SoftMax_DestinationLongerThanSource() - { - float[] x = [3, 1, .2f]; - float[] expectedResult = [0.8360188f, 0.11314284f, 0.05083836f]; - using BoundedMemory dest = CreateTensor(x.Length + 1); - TensorPrimitives.SoftMax(x, dest); - - for (int i = 0; i < x.Length; i++) - { - Assert.Equal(expectedResult[i], dest[i], Tolerance); - } - } - - [Fact] - public static void SoftMax_ThrowsForEmptyInput() - { - AssertExtensions.Throws(() => TensorPrimitives.SoftMax(ReadOnlySpan.Empty, CreateTensor(1))); - } - - [Theory] - [MemberData(nameof(TensorLengths))] - public static void Sigmoid_ThrowsForTooShortDestination(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); - - AssertExtensions.Throws("destination", () => TensorPrimitives.Sigmoid(x, destination)); - } - - [Theory] - [InlineData(new float[] { -5, -4.5f, -4 }, new float[] { 0.0066f, 0.0109f, 0.0179f })] - [InlineData(new float[] { 4.5f, 5 }, new float[] { 0.9890f, 0.9933f })] - [InlineData(new float[] { 0, -3, 3, .5f }, new float[] { 0.5f, 0.0474f, 0.9525f, 0.6224f })] - public static void Sigmoid(float[] x, float[] expectedResult) - { - using BoundedMemory dest = CreateTensor(x.Length); - TensorPrimitives.Sigmoid(x, dest); - - for (int i = 0; i < x.Length; i++) - { - Assert.Equal(expectedResult[i], dest[i], Tolerance); - } - } - - [Fact] - public static void Sigmoid_DestinationLongerThanSource() - { - float[] x = [-5, -4.5f, -4]; - float[] expectedResult = [0.0066f, 0.0109f, 0.0179f]; - using BoundedMemory dest = CreateTensor(x.Length + 1); - - TensorPrimitives.Sigmoid(x, dest); - - float originalLast = dest[dest.Length - 1]; - for (int i = 0; i < x.Length; i++) - { - Assert.Equal(expectedResult[i], dest[i], Tolerance); - } - Assert.Equal(originalLast, dest[dest.Length - 1]); - } - - [Fact] - public static void Sigmoid_ThrowsForEmptyInput() - { - AssertExtensions.Throws(() => TensorPrimitives.Sigmoid(ReadOnlySpan.Empty, CreateTensor(1))); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Exp(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Exp(array.AsSpan(1, 2), array.AsSpan(2, 2))); } + #endregion + #region IndexOfMax [Fact] public static void IndexOfMax_ReturnsNegative1OnEmpty() { @@ -1013,90 +836,96 @@ public static void IndexOfMax_Negative0LesserThanPositive0() Assert.Equal(1, TensorPrimitives.IndexOfMax([-1, -0f])); Assert.Equal(2, TensorPrimitives.IndexOfMax([-1, -0f, 1])); } + #endregion + #region IndexOfMaxMagnitude [Fact] - public static void IndexOfMin_ReturnsNegative1OnEmpty() + public static void IndexOfMaxMagnitude_ReturnsNegative1OnEmpty() { - Assert.Equal(-1, TensorPrimitives.IndexOfMin(ReadOnlySpan.Empty)); + Assert.Equal(-1, TensorPrimitives.IndexOfMaxMagnitude(ReadOnlySpan.Empty)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void IndexOfMin(int tensorLength) + public static void IndexOfMaxMagnitude(int tensorLength) { foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - x[expected] = Enumerable.Min(MemoryMarshal.ToEnumerable(x.Memory)) - 1; - Assert.Equal(expected, TensorPrimitives.IndexOfMin(x)); + x[expected] = Enumerable.Max(MemoryMarshal.ToEnumerable(x.Memory), Math.Abs) + 1; + Assert.Equal(expected, TensorPrimitives.IndexOfMaxMagnitude(x)); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void IndexOfMin_FirstNaNReturned(int tensorLength) + public static void IndexOfMaxMagnitude_FirstNaNReturned(int tensorLength) { foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) { using BoundedMemory x = CreateAndFillTensor(tensorLength); x[expected] = float.NaN; x[tensorLength - 1] = float.NaN; - Assert.Equal(expected, TensorPrimitives.IndexOfMin(x)); + Assert.Equal(expected, TensorPrimitives.IndexOfMaxMagnitude(x)); } } [Fact] - public static void IndexOfMin_Negative0LesserThanPositive0() + public static void IndexOfMaxMagnitude_Negative0LesserThanPositive0() { - Assert.Equal(0, TensorPrimitives.IndexOfMin([-0f, +0f])); - Assert.Equal(1, TensorPrimitives.IndexOfMin([+0f, -0f])); - Assert.Equal(1, TensorPrimitives.IndexOfMin([+0f, -0f, -0f, -0f, -0f])); - Assert.Equal(0, TensorPrimitives.IndexOfMin([-1, -0f])); - Assert.Equal(0, TensorPrimitives.IndexOfMin([-1, -0f, 1])); + Assert.Equal(0, TensorPrimitives.IndexOfMaxMagnitude([-0f, -0f, -0f, -0f])); + Assert.Equal(1, TensorPrimitives.IndexOfMaxMagnitude([-0f, +0f])); + Assert.Equal(1, TensorPrimitives.IndexOfMaxMagnitude([-0f, +0f, +0f, +0f])); + Assert.Equal(0, TensorPrimitives.IndexOfMaxMagnitude([+0f, -0f])); + Assert.Equal(0, TensorPrimitives.IndexOfMaxMagnitude([-1, -0f])); + Assert.Equal(2, TensorPrimitives.IndexOfMaxMagnitude([-1, -0f, 1])); } + #endregion + #region IndexOfMin [Fact] - public static void IndexOfMaxMagnitude_ReturnsNegative1OnEmpty() + public static void IndexOfMin_ReturnsNegative1OnEmpty() { - Assert.Equal(-1, TensorPrimitives.IndexOfMaxMagnitude(ReadOnlySpan.Empty)); + Assert.Equal(-1, TensorPrimitives.IndexOfMin(ReadOnlySpan.Empty)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void IndexOfMaxMagnitude(int tensorLength) + public static void IndexOfMin(int tensorLength) { foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - x[expected] = Enumerable.Max(MemoryMarshal.ToEnumerable(x.Memory), Math.Abs) + 1; - Assert.Equal(expected, TensorPrimitives.IndexOfMaxMagnitude(x)); + x[expected] = Enumerable.Min(MemoryMarshal.ToEnumerable(x.Memory)) - 1; + Assert.Equal(expected, TensorPrimitives.IndexOfMin(x)); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void IndexOfMaxMagnitude_FirstNaNReturned(int tensorLength) + public static void IndexOfMin_FirstNaNReturned(int tensorLength) { foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) { using BoundedMemory x = CreateAndFillTensor(tensorLength); x[expected] = float.NaN; x[tensorLength - 1] = float.NaN; - Assert.Equal(expected, TensorPrimitives.IndexOfMaxMagnitude(x)); + Assert.Equal(expected, TensorPrimitives.IndexOfMin(x)); } } [Fact] - public static void IndexOfMaxMagnitude_Negative0LesserThanPositive0() + public static void IndexOfMin_Negative0LesserThanPositive0() { - Assert.Equal(0, TensorPrimitives.IndexOfMaxMagnitude([-0f, -0f, -0f, -0f])); - Assert.Equal(1, TensorPrimitives.IndexOfMaxMagnitude([-0f, +0f])); - Assert.Equal(1, TensorPrimitives.IndexOfMaxMagnitude([-0f, +0f, +0f, +0f])); - Assert.Equal(0, TensorPrimitives.IndexOfMaxMagnitude([+0f, -0f])); - Assert.Equal(0, TensorPrimitives.IndexOfMaxMagnitude([-1, -0f])); - Assert.Equal(2, TensorPrimitives.IndexOfMaxMagnitude([-1, -0f, 1])); + Assert.Equal(0, TensorPrimitives.IndexOfMin([-0f, +0f])); + Assert.Equal(1, TensorPrimitives.IndexOfMin([+0f, -0f])); + Assert.Equal(1, TensorPrimitives.IndexOfMin([+0f, -0f, -0f, -0f, -0f])); + Assert.Equal(0, TensorPrimitives.IndexOfMin([-1, -0f])); + Assert.Equal(0, TensorPrimitives.IndexOfMin([-1, -0f, 1])); } + #endregion + #region IndexOfMinMagnitude [Fact] public static void IndexOfMinMagnitude_ReturnsNegative1OnEmpty() { @@ -1144,456 +973,1648 @@ public static void IndexOfMinMagnitude_Negative0LesserThanPositive0() Assert.Equal(1, TensorPrimitives.IndexOfMinMagnitude([-1, -0f])); Assert.Equal(1, TensorPrimitives.IndexOfMinMagnitude([-1, -0f, 1])); } + #endregion - [Fact] - public static void Max_ThrowsForEmpty() + #region Log + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Log(int tensorLength) { - Assert.Throws(() => TensorPrimitives.Max(ReadOnlySpan.Empty)); + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.Log(x, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Log(x[i]), destination[i], Tolerance); + } } [Theory] - [MemberData(nameof(TensorLengths))] - public static void Max(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Log_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - Assert.Equal(Enumerable.Max(MemoryMarshal.ToEnumerable(x.Memory)), TensorPrimitives.Max(x)); + TensorPrimitives.Log(x, x); - float max = float.NegativeInfinity; - foreach (float f in x.Span) + for (int i = 0; i < tensorLength; i++) { - max = Math.Max(max, f); + Assert.Equal(MathF.Log(xOrig[i]), x[i], Tolerance); } - Assert.Equal(max, TensorPrimitives.Max(x)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void Max_NanReturned(int tensorLength) + public static void Log_SpecialValues(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) + using BoundedMemory destination = CreateTensor(tensorLength); + + // NaN + x[s_random.Next(x.Length)] = float.NaN; + + // +Infinity + x[s_random.Next(x.Length)] = float.PositiveInfinity; + + // -Infinity + x[s_random.Next(x.Length)] = float.NegativeInfinity; + + // +Zero + x[s_random.Next(x.Length)] = +0.0f; + + // -Zero + x[s_random.Next(x.Length)] = -0.0f; + + // +Epsilon + x[s_random.Next(x.Length)] = +float.Epsilon; + + // -Epsilon + x[s_random.Next(x.Length)] = -float.Epsilon; + + TensorPrimitives.Log(x, destination); + for (int i = 0; i < tensorLength; i++) { - x[expected] = float.NaN; - Assert.Equal(float.NaN, TensorPrimitives.Max(x)); + Assert.Equal(MathF.Log(x[i]), destination[i], Tolerance); } } + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Log_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.Log(x, destination)); + } + [Fact] - public static void Max_Negative0LesserThanPositive0() + public static void Log_ThrowsForOverlapppingInputsWithOutputs() { - Assert.Equal(+0f, TensorPrimitives.Max([-0f, +0f])); - Assert.Equal(+0f, TensorPrimitives.Max([+0f, -0f])); - Assert.Equal(-0f, TensorPrimitives.Max([-1, -0f])); - Assert.Equal(1, TensorPrimitives.Max([-1, -0f, 1])); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Log(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Log(array.AsSpan(1, 2), array.AsSpan(2, 2))); } + #endregion + #region Log2 [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void Max_TwoTensors(int tensorLength) + public static void Log2(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.Max(x, y, destination); + TensorPrimitives.Log2(x, destination); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(MathF.Max(x[i], y[i]), destination[i], Tolerance); + Assert.Equal(MathF.Log(x[i], 2), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Log2_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + + TensorPrimitives.Log2(x, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Log(xOrig[i], 2), x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void Max_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) + public static void Log2_SpecialValues(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); using BoundedMemory destination = CreateTensor(tensorLength); - Assert.Throws(() => TensorPrimitives.Max(x, y, destination)); + // NaN + x[s_random.Next(x.Length)] = float.NaN; + + // +Infinity + x[s_random.Next(x.Length)] = float.PositiveInfinity; + + // -Infinity + x[s_random.Next(x.Length)] = float.NegativeInfinity; + + // +Zero + x[s_random.Next(x.Length)] = +0.0f; + + // -Zero + x[s_random.Next(x.Length)] = -0.0f; + + // +Epsilon + x[s_random.Next(x.Length)] = +float.Epsilon; + + // -Epsilon + x[s_random.Next(x.Length)] = -float.Epsilon; + + TensorPrimitives.Log2(x, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Log(x[i], 2), destination[i], Tolerance); + } } [Theory] [MemberData(nameof(TensorLengths))] - public static void Max_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + public static void Log2_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Max(x, y, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Log2(x, destination)); } [Fact] - public static void MaxMagnitude_ThrowsForEmpty() + public static void Log2_ThrowsForOverlapppingInputsWithOutputs() { - Assert.Throws(() => TensorPrimitives.MaxMagnitude(ReadOnlySpan.Empty)); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Log2(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Log2(array.AsSpan(1, 2), array.AsSpan(2, 2))); + } + #endregion + + #region Max + [Fact] + public static void Max_Tensor_ThrowsForEmpty() + { + Assert.Throws(() => TensorPrimitives.Max(ReadOnlySpan.Empty)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void MaxMagnitude(int tensorLength) + public static void Max_Tensor(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - int index = 0; - for (int i = 0; i < x.Length; i++) + Assert.Equal(Enumerable.Max(MemoryMarshal.ToEnumerable(x.Memory)), TensorPrimitives.Max(x)); + + float max = float.NegativeInfinity; + foreach (float f in x.Span) { - if (MathF.Abs(x[i]) >= MathF.Abs(x[index])) - { - index = i; - } + max = Math.Max(max, f); } - - Assert.Equal(x[index], TensorPrimitives.MaxMagnitude(x), Tolerance); + Assert.Equal(max, TensorPrimitives.Max(x)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void MaxMagnitude_NanReturned(int tensorLength) + public static void Max_Tensor_NanReturned(int tensorLength) { - using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory x = CreateTensor(tensorLength); foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) { + FillTensor(x); x[expected] = float.NaN; - Assert.Equal(float.NaN, TensorPrimitives.MaxMagnitude(x)); + Assert.Equal(float.NaN, TensorPrimitives.Max(x)); } } [Fact] - public static void MaxMagnitude_Negative0LesserThanPositive0() + public static void Max_Tensor_Negative0LesserThanPositive0() { - Assert.Equal(+0f, TensorPrimitives.MaxMagnitude([-0f, +0f])); - Assert.Equal(+0f, TensorPrimitives.MaxMagnitude([+0f, -0f])); - Assert.Equal(-1, TensorPrimitives.MaxMagnitude([-1, -0f])); - Assert.Equal(1, TensorPrimitives.MaxMagnitude([-1, -0f, 1])); - Assert.Equal(0f, TensorPrimitives.MaxMagnitude([-0f, -0f, -0f, -0f, -0f, 0f])); - Assert.Equal(1, TensorPrimitives.MaxMagnitude([-0f, -0f, -0f, -0f, -1, -0f, 0f, 1])); + Assert.Equal(+0f, TensorPrimitives.Max([-0f, +0f])); + Assert.Equal(+0f, TensorPrimitives.Max([+0f, -0f])); + Assert.Equal(-0f, TensorPrimitives.Max([-1, -0f])); + Assert.Equal(1, TensorPrimitives.Max([-1, -0f, 1])); } [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void MaxMagnitude_TwoTensors(int tensorLength) + public static void Max_TwoTensors(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.MaxMagnitude(x, y, destination); + TensorPrimitives.Max(x, y, destination); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(MathF.Abs(x[i]) >= MathF.Abs(y[i]) ? x[i] : y[i], destination[i], Tolerance); + Assert.Equal(MathF.Max(x[i], y[i]), destination[i], Tolerance); } } [Theory] - [MemberData(nameof(TensorLengths))] - public static void MaxMagnitude_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) - { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - using BoundedMemory destination = CreateTensor(tensorLength); - - Assert.Throws(() => TensorPrimitives.MaxMagnitude(x, y, destination)); - } - - [Theory] - [MemberData(nameof(TensorLengths))] - public static void MaxMagnitude_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Max_TwoTensors_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength - 1); + float[] xOrig = x.Span.ToArray(), yOrig = y.Span.ToArray(); + + TensorPrimitives.Max(x, y, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Max(xOrig[i], y[i]), x[i], Tolerance); + } + + xOrig.AsSpan().CopyTo(x.Span); + yOrig.AsSpan().CopyTo(y.Span); + + TensorPrimitives.Max(x, y, y); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Max(x[i], yOrig[i]), y[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Max_TwoTensors_SpecialValues(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + // NaNs + x[s_random.Next(x.Length)] = float.NaN; + y[s_random.Next(y.Length)] = float.NaN; + + // Same magnitude, opposite sign + int pos = s_random.Next(x.Length); + x[pos] = -5f; + y[pos] = 5f; + + // Positive and negative 0s + pos = s_random.Next(x.Length); + x[pos] = 0f; + y[pos] = -0f; + + TensorPrimitives.Max(x, y, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Max(x[i], y[i]), destination[i], Tolerance); + } + + TensorPrimitives.Max(y, x, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Max(y[i], x[i]), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Max_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory destination = CreateTensor(tensorLength); + + Assert.Throws(() => TensorPrimitives.Max(x, y, destination)); + Assert.Throws(() => TensorPrimitives.Max(y, x, destination)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Max_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.Max(x, y, destination)); + } + + [Fact] + public static void Max_TwoTensors_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Max(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Max(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Max(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Max(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(5, 2))); + } + #endregion + + #region MaxMagnitude + [Fact] + public static void MaxMagnitude_Tensor_ThrowsForEmpty() + { + Assert.Throws(() => TensorPrimitives.MaxMagnitude(ReadOnlySpan.Empty)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MaxMagnitude_Tensor(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + + float maxMagnitude = x[0]; + foreach (float i in x.Span) + { + maxMagnitude = MathFMaxMagnitude(maxMagnitude, i); + } + + Assert.Equal(maxMagnitude, TensorPrimitives.MaxMagnitude(x), Tolerance); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MaxMagnitude_Tensor_NanReturned(int tensorLength) + { + using BoundedMemory x = CreateTensor(tensorLength); + foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) + { + FillTensor(x); + x[expected] = float.NaN; + Assert.Equal(float.NaN, TensorPrimitives.MaxMagnitude(x)); + } + } + + [Fact] + public static void MaxMagnitude_Tensor_Negative0LesserThanPositive0() + { + Assert.Equal(+0f, TensorPrimitives.MaxMagnitude([-0f, +0f])); + Assert.Equal(+0f, TensorPrimitives.MaxMagnitude([+0f, -0f])); + Assert.Equal(-1, TensorPrimitives.MaxMagnitude([-1, -0f])); + Assert.Equal(1, TensorPrimitives.MaxMagnitude([-1, -0f, 1])); + Assert.Equal(0f, TensorPrimitives.MaxMagnitude([-0f, -0f, -0f, -0f, -0f, 0f])); + Assert.Equal(1, TensorPrimitives.MaxMagnitude([-0f, -0f, -0f, -0f, -1, -0f, 0f, 1])); + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MaxMagnitude_TwoTensors(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.MaxMagnitude(x, y, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMaxMagnitude(x[i], y[i]), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MaxMagnitude_TwoTensors_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(), yOrig = y.Span.ToArray(); + + TensorPrimitives.MaxMagnitude(x, y, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMaxMagnitude(xOrig[i], y[i]), x[i], Tolerance); + } + + xOrig.AsSpan().CopyTo(x.Span); + yOrig.AsSpan().CopyTo(y.Span); + + TensorPrimitives.MaxMagnitude(x, y, y); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMaxMagnitude(x[i], yOrig[i]), y[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MaxMagnitude_TwoTensors_SpecialValues(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + // NaNs + x[s_random.Next(x.Length)] = float.NaN; + y[s_random.Next(y.Length)] = float.NaN; + + // Same magnitude, opposite sign + int pos = s_random.Next(x.Length); + x[pos] = -5f; + y[pos] = 5f; + + // Positive and negative 0s + pos = s_random.Next(x.Length); + x[pos] = 0f; + y[pos] = -0f; + + TensorPrimitives.MaxMagnitude(x, y, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMaxMagnitude(x[i], y[i]), destination[i], Tolerance); + } + + TensorPrimitives.MaxMagnitude(y, x, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMaxMagnitude(y[i], x[i]), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MaxMagnitude_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory destination = CreateTensor(tensorLength); + + Assert.Throws(() => TensorPrimitives.MaxMagnitude(x, y, destination)); + Assert.Throws(() => TensorPrimitives.MaxMagnitude(y, x, destination)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MaxMagnitude_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); AssertExtensions.Throws("destination", () => TensorPrimitives.MaxMagnitude(x, y, destination)); } [Fact] - public static void Min_ThrowsForEmpty() + public static void MaxMagnitude_TwoTensors_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.MaxMagnitude(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MaxMagnitude(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MaxMagnitude(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MaxMagnitude(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(5, 2))); + } + #endregion + + #region Min + [Fact] + public static void Min_Tensor_ThrowsForEmpty() { Assert.Throws(() => TensorPrimitives.Min(ReadOnlySpan.Empty)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void Min(int tensorLength) + public static void Min_Tensor(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + + Assert.Equal(Enumerable.Min(MemoryMarshal.ToEnumerable(x.Memory)), TensorPrimitives.Min(x)); + + float min = float.PositiveInfinity; + foreach (float f in x.Span) + { + min = Math.Min(min, f); + } + Assert.Equal(min, TensorPrimitives.Min(x)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Min_Tensor_NanReturned(int tensorLength) + { + using BoundedMemory x = CreateTensor(tensorLength); + foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) + { + FillTensor(x); + x[expected] = float.NaN; + Assert.Equal(float.NaN, TensorPrimitives.Min(x)); + } + } + + [Fact] + public static void Min_Tensor_Negative0LesserThanPositive0() + { + Assert.Equal(-0f, TensorPrimitives.Min([-0f, +0f])); + Assert.Equal(-0f, TensorPrimitives.Min([+0f, -0f])); + Assert.Equal(-1, TensorPrimitives.Min([-1, -0f])); + Assert.Equal(-1, TensorPrimitives.Min([-1, -0f, 1])); + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Min_TwoTensors(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.Min(x, y, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Min(x[i], y[i]), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Min_TwoTensors_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(), yOrig = y.Span.ToArray(); + + TensorPrimitives.Min(x, y, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Min(xOrig[i], y[i]), x[i], Tolerance); + } + + xOrig.AsSpan().CopyTo(x.Span); + yOrig.AsSpan().CopyTo(y.Span); + + TensorPrimitives.Min(x, y, y); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Min(x[i], yOrig[i]), y[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Min_TwoTensors_SpecialValues(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + // NaNs + x[s_random.Next(x.Length)] = float.NaN; + y[s_random.Next(y.Length)] = float.NaN; + + // Same magnitude, opposite sign + int pos = s_random.Next(x.Length); + x[pos] = -5f; + y[pos] = 5f; + + // Positive and negative 0s + pos = s_random.Next(x.Length); + x[pos] = 0f; + y[pos] = -0f; + + TensorPrimitives.Min(x, y, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Min(x[i], y[i]), destination[i], Tolerance); + } + + TensorPrimitives.Min(y, x, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Min(y[i], x[i]), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Min_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory destination = CreateTensor(tensorLength); + + Assert.Throws(() => TensorPrimitives.Min(x, y, destination)); + Assert.Throws(() => TensorPrimitives.Min(y, x, destination)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Min_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.Min(x, y, destination)); + } + + [Fact] + public static void Min_TwoTensors_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Min(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Min(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Min(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Min(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(5, 2))); + } + #endregion + + #region MinMagnitude + [Fact] + public static void MinMagnitude_Tensor_ThrowsForEmpty() + { + Assert.Throws(() => TensorPrimitives.MinMagnitude(ReadOnlySpan.Empty)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MinMagnitude_Tensor(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + + float minMagnitude = x[0]; + foreach (float i in x.Span) + { + minMagnitude = MathFMinMagnitude(minMagnitude, i); + } + + Assert.Equal(minMagnitude, TensorPrimitives.MinMagnitude(x), Tolerance); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MinMagnitude_Tensor_NanReturned(int tensorLength) + { + using BoundedMemory x = CreateTensor(tensorLength); + foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) + { + FillTensor(x); + x[expected] = float.NaN; + Assert.Equal(float.NaN, TensorPrimitives.MinMagnitude(x)); + } + } + + [Fact] + public static void MinMagnitude_Tensor_Negative0LesserThanPositive0() + { + Assert.Equal(0, TensorPrimitives.MinMagnitude([-0f, +0f])); + Assert.Equal(0, TensorPrimitives.MinMagnitude([+0f, -0f])); + Assert.Equal(0, TensorPrimitives.MinMagnitude([-1, -0f])); + Assert.Equal(0, TensorPrimitives.MinMagnitude([-1, -0f, 1])); + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MinMagnitude_TwoTensors(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.MinMagnitude(x, y, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMinMagnitude(x[i], y[i]), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MinMagnitude_TwoTensors_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(), yOrig = y.Span.ToArray(); + + TensorPrimitives.MinMagnitude(x, y, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMinMagnitude(xOrig[i], y[i]), x[i], Tolerance); + } + + xOrig.AsSpan().CopyTo(x.Span); + yOrig.AsSpan().CopyTo(y.Span); + + TensorPrimitives.MinMagnitude(x, y, y); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMinMagnitude(x[i], yOrig[i]), y[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MinMagnitude_TwoTensors_SpecialValues(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + // NaNs + x[s_random.Next(x.Length)] = float.NaN; + y[s_random.Next(y.Length)] = float.NaN; + + // Same magnitude, opposite sign + int pos = s_random.Next(x.Length); + x[pos] = -5f; + y[pos] = 5f; + + // Positive and negative 0s + pos = s_random.Next(x.Length); + x[pos] = 0f; + y[pos] = -0f; + + TensorPrimitives.MinMagnitude(x, y, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMinMagnitude(x[i], y[i]), destination[i], Tolerance); + } + + TensorPrimitives.MinMagnitude(y, x, destination); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathFMinMagnitude(y[i], x[i]), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MinMagnitude_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory destination = CreateTensor(tensorLength); + + Assert.Throws(() => TensorPrimitives.MinMagnitude(x, y, destination)); + Assert.Throws(() => TensorPrimitives.MinMagnitude(y, x, destination)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MinMagnitude_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.MinMagnitude(x, y, destination)); + } + + [Fact] + public static void MinMagnitude_TwoTensors_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.MinMagnitude(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MinMagnitude(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MinMagnitude(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MinMagnitude(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(5, 2))); + } + #endregion + + #region Multiply + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Multiply_TwoTensors(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.Multiply(x, y, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(x[i] * y[i], destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Multiply_TwoTensors_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + + TensorPrimitives.Multiply(x, x, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(xOrig[i] * xOrig[i], x[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Multiply_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory destination = CreateTensor(tensorLength); + + Assert.Throws(() => TensorPrimitives.Multiply(x, y, destination)); + Assert.Throws(() => TensorPrimitives.Multiply(y, x, destination)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Multiply_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(x, y, destination)); + } + + [Fact] + public static void Multiply_TwoTensors_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(5, 2))); + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Multiply_TensorScalar(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.Multiply(x, y, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(x[i] * y, destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Multiply_TensorScalar_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + float y = NextSingle(); + + TensorPrimitives.Multiply(x, y, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(xOrig[i] * y, x[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Multiply_TensorScalar_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(x, y, destination)); + } + + [Fact] + public static void Multiply_TensorScalar_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(array.AsSpan(1, 2), 42, array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Multiply(array.AsSpan(1, 2), 42, array.AsSpan(2, 2))); + } + #endregion + + #region MultiplyAdd + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MultiplyAdd_ThreeTensors(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory addend = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.MultiplyAdd(x, y, addend, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal((x[i] * y[i]) + addend[i], destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MultiplyAdd_ThreeTensors_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + + TensorPrimitives.MultiplyAdd(x, x, x, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal((xOrig[i] * xOrig[i]) + xOrig[i], x[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MultiplyAdd_ThreeTensors_ThrowsForMismatchedLengths_x_y(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory z = CreateAndFillTensor(tensorLength - 1); + using BoundedMemory destination = CreateTensor(tensorLength); + + Assert.Throws(() => TensorPrimitives.MultiplyAdd(x, y, z, destination)); + Assert.Throws(() => TensorPrimitives.MultiplyAdd(x, z, y, destination)); + Assert.Throws(() => TensorPrimitives.MultiplyAdd(z, x, y, destination)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MultiplyAdd_ThreeTensors_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + using BoundedMemory addend = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(x, y, addend, destination)); + } + + [Fact] + public static void MultiplyAdd_ThreeTensors_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(5, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(6, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(7, 2), array.AsSpan(8, 2))); + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MultiplyAdd_TensorTensorScalar(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + float addend = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.MultiplyAdd(x, y, addend, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal((x[i] * y[i]) + addend, destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MultiplyAdd_TensorTensorScalar_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + float addend = NextSingle(); + + TensorPrimitives.MultiplyAdd(x, x, addend, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal((xOrig[i] * xOrig[i]) + addend, x[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MultiplyAdd_TensorTensorScalar_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + float addend = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(x, y, addend, destination)); + } + + [Fact] + public static void MultiplyAdd_TensorTensorScalar_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), 42, array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), 42, array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), 42, array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), array.AsSpan(4, 2), 42, array.AsSpan(5, 2))); + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MultiplyAdd_TensorScalarTensor(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory addend = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.MultiplyAdd(x, y, addend, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal((x[i] * y) + addend[i], destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void MultiplyAdd_TensorScalarTensor_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + float y = NextSingle(); + + TensorPrimitives.MultiplyAdd(x, y, x, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal((xOrig[i] * y) + xOrig[i], x[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void MultiplyAdd_TensorScalarTensor_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory addend = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(x, y, addend, destination)); + } + + [Fact] + public static void MultiplyAdd_TensorScalarTensor_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), 42, array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), 42, array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), 42, array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.MultiplyAdd(array.AsSpan(1, 2), 42, array.AsSpan(4, 2), array.AsSpan(5, 2))); + } + #endregion + + #region Negate + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Negate(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.Negate(x, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(-x[i], destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Negate_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + + TensorPrimitives.Negate(x, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(-xOrig[i], x[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Negate_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.Negate(x, destination)); + } + + [Fact] + public static void Negate_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Negate(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Negate(array.AsSpan(1, 2), array.AsSpan(2, 2))); + } + #endregion + + #region Norm + [Theory] + [InlineData(new float[] { 1, 2, 3 }, 3.7416575f)] + [InlineData(new float[] { 3, 4 }, 5)] + [InlineData(new float[] { 3 }, 3)] + [InlineData(new float[] { 3, 4, 1, 2 }, 5.477226)] + [InlineData(new float[] { }, 0f)] + public static void Norm_KnownValues(float[] x, float expectedResult) + { + Assert.Equal(expectedResult, TensorPrimitives.Norm(x), Tolerance); + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Norm(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + + float sumOfSquares = 0f; + for (int i = 0; i < x.Length; i++) + { + sumOfSquares += x[i] * x[i]; + } + + Assert.Equal(Math.Sqrt(sumOfSquares), TensorPrimitives.Norm(x), Tolerance); + } + #endregion + + #region Product + [Fact] + public static void Product_ThrowsForEmpty() + { + Assert.Throws(() => TensorPrimitives.Product(ReadOnlySpan.Empty)); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Product(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + + float f = x[0]; + for (int i = 1; i < x.Length; i++) + { + f *= x[i]; + } + + Assert.Equal(f, TensorPrimitives.Product(x), Tolerance); + } + + [Fact] + public static void Product_KnownValues() + { + Assert.Equal(1, TensorPrimitives.Product([1])); + Assert.Equal(-2, TensorPrimitives.Product([1, -2])); + Assert.Equal(-6, TensorPrimitives.Product([1, -2, 3])); + Assert.Equal(24, TensorPrimitives.Product([1, -2, 3, -4])); + Assert.Equal(120, TensorPrimitives.Product([1, -2, 3, -4, 5])); + Assert.Equal(-720, TensorPrimitives.Product([1, -2, 3, -4, 5, -6])); + Assert.Equal(0, TensorPrimitives.Product([1, -2, 3, -4, 5, -6, 0])); + Assert.Equal(0, TensorPrimitives.Product([0, 1, -2, 3, -4, 5, -6])); + Assert.Equal(0, TensorPrimitives.Product([1, -2, 3, 0, -4, 5, -6])); + Assert.Equal(float.NaN, TensorPrimitives.Product([1, -2, 3, float.NaN, -4, 5, -6])); + } + #endregion + + #region ProductOfDifferences + [Fact] + public static void ProductOfDifferences_ThrowsForEmptyAndMismatchedLengths() + { + Assert.Throws(() => TensorPrimitives.ProductOfDifferences(ReadOnlySpan.Empty, ReadOnlySpan.Empty)); + Assert.Throws(() => TensorPrimitives.ProductOfDifferences(ReadOnlySpan.Empty, CreateTensor(1))); + Assert.Throws(() => TensorPrimitives.ProductOfDifferences(CreateTensor(1), ReadOnlySpan.Empty)); + Assert.Throws(() => TensorPrimitives.ProductOfDifferences(CreateTensor(44), CreateTensor(43))); + Assert.Throws(() => TensorPrimitives.ProductOfDifferences(CreateTensor(43), CreateTensor(44))); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void ProductOfDifferences(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + + float f = x[0] - y[0]; + for (int i = 1; i < x.Length; i++) + { + f *= x[i] - y[i]; + } + Assert.Equal(f, TensorPrimitives.ProductOfDifferences(x, y), Tolerance); + } + + [Fact] + public static void ProductOfDifferences_KnownValues() + { + Assert.Equal(0, TensorPrimitives.ProductOfDifferences([0], [0])); + Assert.Equal(0, TensorPrimitives.ProductOfDifferences([1], [1])); + Assert.Equal(1, TensorPrimitives.ProductOfDifferences([1], [0])); + Assert.Equal(-1, TensorPrimitives.ProductOfDifferences([0], [1])); + Assert.Equal(-1, TensorPrimitives.ProductOfDifferences([1, 2, 3, 4, 5], [2, 3, 4, 5, 6])); + Assert.Equal(120, TensorPrimitives.ProductOfDifferences([1, 2, 3, 4, 5], [0, 0, 0, 0, 0])); + Assert.Equal(-120, TensorPrimitives.ProductOfDifferences([0, 0, 0, 0, 0], [1, 2, 3, 4, 5])); + Assert.Equal(float.NaN, TensorPrimitives.ProductOfDifferences([1, 2, float.NaN, 4, 5], [0, 0, 0, 0, 0])); + } + #endregion + + #region ProductOfSums + [Fact] + public static void ProductOfSums_ThrowsForEmptyAndMismatchedLengths() + { + Assert.Throws(() => TensorPrimitives.ProductOfSums(ReadOnlySpan.Empty, ReadOnlySpan.Empty)); + Assert.Throws(() => TensorPrimitives.ProductOfSums(ReadOnlySpan.Empty, CreateTensor(1))); + Assert.Throws(() => TensorPrimitives.ProductOfSums(CreateTensor(1), ReadOnlySpan.Empty)); + Assert.Throws(() => TensorPrimitives.ProductOfSums(CreateTensor(44), CreateTensor(43))); + Assert.Throws(() => TensorPrimitives.ProductOfSums(CreateTensor(43), CreateTensor(44))); + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void ProductOfSums(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory y = CreateAndFillTensor(tensorLength); + + float f = x[0] + y[0]; + for (int i = 1; i < x.Length; i++) + { + f *= x[i] + y[i]; + } + Assert.Equal(f, TensorPrimitives.ProductOfSums(x, y), Tolerance); + } + + [Fact] + public static void ProductOfSums_KnownValues() + { + Assert.Equal(0, TensorPrimitives.ProductOfSums([0], [0])); + Assert.Equal(1, TensorPrimitives.ProductOfSums([0], [1])); + Assert.Equal(1, TensorPrimitives.ProductOfSums([1], [0])); + Assert.Equal(2, TensorPrimitives.ProductOfSums([1], [1])); + Assert.Equal(10395, TensorPrimitives.ProductOfSums([1, 2, 3, 4, 5], [2, 3, 4, 5, 6])); + Assert.Equal(120, TensorPrimitives.ProductOfSums([1, 2, 3, 4, 5], [0, 0, 0, 0, 0])); + Assert.Equal(120, TensorPrimitives.ProductOfSums([0, 0, 0, 0, 0], [1, 2, 3, 4, 5])); + Assert.Equal(float.NaN, TensorPrimitives.ProductOfSums([1, 2, float.NaN, 4, 5], [0, 0, 0, 0, 0])); + } + #endregion + + #region Sigmoid + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Sigmoid(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.Sigmoid(x, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(1f / (1f + MathF.Exp(-x[i])), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Sigmoid_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + + TensorPrimitives.Sigmoid(x, x); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(1f / (1f + MathF.Exp(-xOrig[i])), x[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengths))] + public static void Sigmoid_ThrowsForTooShortDestination(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.Sigmoid(x, destination)); + } + + [Theory] + [InlineData(new float[] { -5, -4.5f, -4 }, new float[] { 0.0066f, 0.0109f, 0.0179f })] + [InlineData(new float[] { 4.5f, 5 }, new float[] { 0.9890f, 0.9933f })] + [InlineData(new float[] { 0, -3, 3, .5f }, new float[] { 0.5f, 0.0474f, 0.9525f, 0.6224f })] + public static void Sigmoid_KnownValues(float[] x, float[] expectedResult) + { + using BoundedMemory dest = CreateTensor(x.Length); + TensorPrimitives.Sigmoid(x, dest); + + for (int i = 0; i < x.Length; i++) + { + Assert.Equal(expectedResult[i], dest[i], Tolerance); + } + } + + [Fact] + public static void Sigmoid_DestinationLongerThanSource() + { + float[] x = [-5, -4.5f, -4]; + float[] expectedResult = [0.0066f, 0.0109f, 0.0179f]; + using BoundedMemory dest = CreateTensor(x.Length + 1); + + TensorPrimitives.Sigmoid(x, dest); + + float originalLast = dest[dest.Length - 1]; + for (int i = 0; i < x.Length; i++) + { + Assert.Equal(expectedResult[i], dest[i], Tolerance); + } + Assert.Equal(originalLast, dest[dest.Length - 1]); + } + + [Fact] + public static void Sigmoid_ThrowsForEmptyInput() + { + AssertExtensions.Throws(() => TensorPrimitives.Sigmoid(ReadOnlySpan.Empty, CreateTensor(1))); + } + + [Fact] + public static void Sigmoid_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Sigmoid(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Sigmoid(array.AsSpan(1, 2), array.AsSpan(2, 2))); + } + #endregion + + #region Sinh + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Sinh(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); + + TensorPrimitives.Sinh(x, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Sinh(x[i]), destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Sinh_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - Assert.Equal(Enumerable.Min(MemoryMarshal.ToEnumerable(x.Memory)), TensorPrimitives.Min(x)); + TensorPrimitives.Sinh(x, x); - float min = float.PositiveInfinity; - foreach (float f in x.Span) + for (int i = 0; i < tensorLength; i++) { - min = Math.Min(min, f); + Assert.Equal(MathF.Sinh(xOrig[i]), x[i], Tolerance); } - Assert.Equal(min, TensorPrimitives.Min(x)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void Min_NanReturned(int tensorLength) + public static void Sinh_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) - { - x[expected] = float.NaN; - Assert.Equal(float.NaN, TensorPrimitives.Min(x)); - } + using BoundedMemory destination = CreateTensor(tensorLength - 1); + + AssertExtensions.Throws("destination", () => TensorPrimitives.Sinh(x, destination)); } [Fact] - public static void Min_Negative0LesserThanPositive0() + public static void Sinh_ThrowsForOverlapppingInputsWithOutputs() { - Assert.Equal(-0f, TensorPrimitives.Min([-0f, +0f])); - Assert.Equal(-0f, TensorPrimitives.Min([+0f, -0f])); - Assert.Equal(-1, TensorPrimitives.Min([-1, -0f])); - Assert.Equal(-1, TensorPrimitives.Min([-1, -0f, 1])); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Sinh(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Sinh(array.AsSpan(1, 2), array.AsSpan(2, 2))); } + #endregion + #region SoftMax [Theory] - [MemberData(nameof(TensorLengthsIncluding0))] - public static void Min_TwoTensors(int tensorLength) + [MemberData(nameof(TensorLengths))] + public static void SoftMax(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.Min(x, y, destination); + TensorPrimitives.SoftMax(x, destination); + float expSum = MemoryMarshal.ToEnumerable(x.Memory).Sum(MathF.Exp); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(MathF.Min(x[i], y[i]), destination[i], Tolerance); + Assert.Equal(MathF.Exp(x[i]) / expSum, destination[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void Min_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) + public static void SoftMax_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); - using BoundedMemory destination = CreateTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - Assert.Throws(() => TensorPrimitives.Min(x, y, destination)); + TensorPrimitives.SoftMax(x, x); + + float expSum = xOrig.Sum(MathF.Exp); + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(MathF.Exp(xOrig[i]) / expSum, x[i], Tolerance); + } } [Theory] [MemberData(nameof(TensorLengths))] - public static void Min_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + public static void SoftMax_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Min(x, y, destination)); - } - - [Fact] - public static void MinMagnitude_ThrowsForEmpty() - { - Assert.Throws(() => TensorPrimitives.MinMagnitude(ReadOnlySpan.Empty)); + AssertExtensions.Throws("destination", () => TensorPrimitives.SoftMax(x, destination)); } [Theory] - [MemberData(nameof(TensorLengths))] - public static void MinMagnitude(int tensorLength) + [InlineData(new float[] { 3, 1, .2f }, new float[] { 0.8360188f, 0.11314284f, 0.05083836f })] + [InlineData(new float[] { 3, 4, 1 }, new float[] { 0.2594f, 0.705384f, 0.0351f })] + [InlineData(new float[] { 5, 3 }, new float[] { 0.8807f, 0.1192f })] + [InlineData(new float[] { 4, 2, 1, 9 }, new float[] { 0.0066f, 9.04658e-4f, 3.32805e-4f, 0.9920f })] + public static void SoftMax_KnownValues(float[] x, float[] expectedResult) { - using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory dest = CreateTensor(x.Length); + TensorPrimitives.SoftMax(x, dest); - int index = 0; for (int i = 0; i < x.Length; i++) { - if (MathF.Abs(x[i]) < MathF.Abs(x[index])) - { - index = i; - } + Assert.Equal(expectedResult[i], dest[i], Tolerance); } - - Assert.Equal(x[index], TensorPrimitives.MinMagnitude(x), Tolerance); } - [Theory] - [MemberData(nameof(TensorLengths))] - public static void MinMagnitude_NanReturned(int tensorLength) + [Fact] + public static void SoftMax_DestinationLongerThanSource() { - using BoundedMemory x = CreateAndFillTensor(tensorLength); - foreach (int expected in new[] { 0, tensorLength / 2, tensorLength - 1 }) + float[] x = [3, 1, .2f]; + float[] expectedResult = [0.8360188f, 0.11314284f, 0.05083836f]; + using BoundedMemory dest = CreateTensor(x.Length + 1); + TensorPrimitives.SoftMax(x, dest); + + for (int i = 0; i < x.Length; i++) { - x[expected] = float.NaN; - Assert.Equal(float.NaN, TensorPrimitives.MinMagnitude(x)); + Assert.Equal(expectedResult[i], dest[i], Tolerance); } } [Fact] - public static void MinMagnitude_Negative0LesserThanPositive0() + public static void SoftMax_ThrowsForEmptyInput() { - Assert.Equal(0, TensorPrimitives.MinMagnitude([-0f, +0f])); - Assert.Equal(0, TensorPrimitives.MinMagnitude([+0f, -0f])); - Assert.Equal(0, TensorPrimitives.MinMagnitude([-1, -0f])); - Assert.Equal(0, TensorPrimitives.MinMagnitude([-1, -0f, 1])); + AssertExtensions.Throws(() => TensorPrimitives.SoftMax(ReadOnlySpan.Empty, CreateTensor(1))); + } + + [Fact] + public static void SoftMax_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.SoftMax(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.SoftMax(array.AsSpan(1, 2), array.AsSpan(2, 2))); } + #endregion + #region Subtract [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void MinMagnitude_TwoTensors(int tensorLength) + public static void Subtract_TwoTensors(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength); - TensorPrimitives.MinMagnitude(x, y, destination); + TensorPrimitives.Subtract(x, y, destination); + + for (int i = 0; i < tensorLength; i++) + { + Assert.Equal(x[i] - y[i], destination[i], Tolerance); + } + } + + [Theory] + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Subtract_TwoTensors_InPlace(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + + TensorPrimitives.Subtract(x, x, x); for (int i = 0; i < tensorLength; i++) { - Assert.Equal(MathF.Abs(x[i]) < MathF.Abs(y[i]) ? x[i] : y[i], destination[i], Tolerance); + Assert.Equal(xOrig[i] - xOrig[i], x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void MinMagnitude_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) + public static void Subtract_TwoTensors_ThrowsForMismatchedLengths(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength - 1); using BoundedMemory destination = CreateTensor(tensorLength); - Assert.Throws(() => TensorPrimitives.MinMagnitude(x, y, destination)); + Assert.Throws(() => TensorPrimitives.Subtract(x, y, destination)); + Assert.Throws(() => TensorPrimitives.Subtract(y, x, destination)); } [Theory] [MemberData(nameof(TensorLengths))] - public static void MinMagnitude_TwoTensors_ThrowsForTooShortDestination(int tensorLength) + public static void Subtract_TwoTensors_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory y = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.MinMagnitude(x, y, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(x, y, destination)); } [Fact] - public static void Product_ThrowsForEmpty() + public static void Subtract_TwoTensors_ThrowsForOverlapppingInputsWithOutputs() { - Assert.Throws(() => TensorPrimitives.Product(ReadOnlySpan.Empty)); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(2, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(3, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(array.AsSpan(1, 2), array.AsSpan(4, 2), array.AsSpan(5, 2))); } [Theory] - [MemberData(nameof(TensorLengths))] - public static void Product(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Subtract_TensorScalar(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength); - float f = x[0]; - for (int i = 1; i < x.Length; i++) + TensorPrimitives.Subtract(x, y, destination); + + for (int i = 0; i < tensorLength; i++) { - f *= x[i]; + Assert.Equal(x[i] - y, destination[i], Tolerance); } - - Assert.Equal(f, TensorPrimitives.Product(x), Tolerance); - } - - [Fact] - public static void Product_KnownValues() - { - Assert.Equal(1, TensorPrimitives.Product([1])); - Assert.Equal(-2, TensorPrimitives.Product([1, -2])); - Assert.Equal(-6, TensorPrimitives.Product([1, -2, 3])); - Assert.Equal(24, TensorPrimitives.Product([1, -2, 3, -4])); - Assert.Equal(120, TensorPrimitives.Product([1, -2, 3, -4, 5])); - Assert.Equal(-720, TensorPrimitives.Product([1, -2, 3, -4, 5, -6])); - Assert.Equal(0, TensorPrimitives.Product([1, -2, 3, -4, 5, -6, 0])); - Assert.Equal(0, TensorPrimitives.Product([0, 1, -2, 3, -4, 5, -6])); - Assert.Equal(0, TensorPrimitives.Product([1, -2, 3, 0, -4, 5, -6])); - Assert.Equal(float.NaN, TensorPrimitives.Product([1, -2, 3, float.NaN, -4, 5, -6])); - } - - [Fact] - public static void ProductOfDifferences_ThrowsForEmptyAndMismatchedLengths() - { - Assert.Throws(() => TensorPrimitives.ProductOfDifferences(ReadOnlySpan.Empty, ReadOnlySpan.Empty)); - Assert.Throws(() => TensorPrimitives.ProductOfDifferences(ReadOnlySpan.Empty, CreateTensor(1))); - Assert.Throws(() => TensorPrimitives.ProductOfDifferences(CreateTensor(1), ReadOnlySpan.Empty)); - Assert.Throws(() => TensorPrimitives.ProductOfDifferences(CreateTensor(44), CreateTensor(43))); - Assert.Throws(() => TensorPrimitives.ProductOfDifferences(CreateTensor(43), CreateTensor(44))); } [Theory] - [MemberData(nameof(TensorLengths))] - public static void ProductOfDifferences(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Subtract_TensorScalar_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); + float y = NextSingle(); - float f = x[0] - y[0]; - for (int i = 1; i < x.Length; i++) + TensorPrimitives.Subtract(x, y, x); + + for (int i = 0; i < tensorLength; i++) { - f *= x[i] - y[i]; + Assert.Equal(xOrig[i] - y, x[i], Tolerance); } - Assert.Equal(f, TensorPrimitives.ProductOfDifferences(x, y), Tolerance); - } - - [Fact] - public static void ProductOfDifferences_KnownValues() - { - Assert.Equal(0, TensorPrimitives.ProductOfDifferences([0], [0])); - Assert.Equal(0, TensorPrimitives.ProductOfDifferences([1], [1])); - Assert.Equal(1, TensorPrimitives.ProductOfDifferences([1], [0])); - Assert.Equal(-1, TensorPrimitives.ProductOfDifferences([0], [1])); - Assert.Equal(-1, TensorPrimitives.ProductOfDifferences([1, 2, 3, 4, 5], [2, 3, 4, 5, 6])); - Assert.Equal(120, TensorPrimitives.ProductOfDifferences([1, 2, 3, 4, 5], [0, 0, 0, 0, 0])); - Assert.Equal(-120, TensorPrimitives.ProductOfDifferences([0, 0, 0, 0, 0], [1, 2, 3, 4, 5])); - Assert.Equal(float.NaN, TensorPrimitives.ProductOfDifferences([1, 2, float.NaN, 4, 5], [0, 0, 0, 0, 0])); - } - - [Fact] - public static void ProductOfSums_ThrowsForEmptyAndMismatchedLengths() - { - Assert.Throws(() => TensorPrimitives.ProductOfSums(ReadOnlySpan.Empty, ReadOnlySpan.Empty)); - Assert.Throws(() => TensorPrimitives.ProductOfSums(ReadOnlySpan.Empty, CreateTensor(1))); - Assert.Throws(() => TensorPrimitives.ProductOfSums(CreateTensor(1), ReadOnlySpan.Empty)); - Assert.Throws(() => TensorPrimitives.ProductOfSums(CreateTensor(44), CreateTensor(43))); - Assert.Throws(() => TensorPrimitives.ProductOfSums(CreateTensor(43), CreateTensor(44))); } [Theory] [MemberData(nameof(TensorLengths))] - public static void ProductOfSums(int tensorLength) + public static void Subtract_TensorScalar_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory y = CreateAndFillTensor(tensorLength); + float y = NextSingle(); + using BoundedMemory destination = CreateTensor(tensorLength - 1); - float f = x[0] + y[0]; - for (int i = 1; i < x.Length; i++) - { - f *= x[i] + y[i]; - } - Assert.Equal(f, TensorPrimitives.ProductOfSums(x, y), Tolerance); + AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(x, y, destination)); } [Fact] - public static void ProductOfSums_KnownValues() + public static void Subtract_TensorScalar_ThrowsForOverlapppingInputsWithOutputs() { - Assert.Equal(0, TensorPrimitives.ProductOfSums([0], [0])); - Assert.Equal(1, TensorPrimitives.ProductOfSums([0], [1])); - Assert.Equal(1, TensorPrimitives.ProductOfSums([1], [0])); - Assert.Equal(2, TensorPrimitives.ProductOfSums([1], [1])); - Assert.Equal(10395, TensorPrimitives.ProductOfSums([1, 2, 3, 4, 5], [2, 3, 4, 5, 6])); - Assert.Equal(120, TensorPrimitives.ProductOfSums([1, 2, 3, 4, 5], [0, 0, 0, 0, 0])); - Assert.Equal(120, TensorPrimitives.ProductOfSums([0, 0, 0, 0, 0], [1, 2, 3, 4, 5])); - Assert.Equal(float.NaN, TensorPrimitives.ProductOfSums([1, 2, float.NaN, 4, 5], [0, 0, 0, 0, 0])); + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(array.AsSpan(1, 2), 42, array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Subtract(array.AsSpan(1, 2), 42, array.AsSpan(2, 2))); } + #endregion + #region Sum [Theory] [MemberData(nameof(TensorLengths))] public static void Sum(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - Assert.Equal(Enumerable.Sum(MemoryMarshal.ToEnumerable(x.Memory)), TensorPrimitives.Sum(x), Tolerance); + Assert.Equal(MemoryMarshal.ToEnumerable(x.Memory).Sum(), TensorPrimitives.Sum(x), Tolerance); float sum = 0; foreach (float f in x.Span) @@ -1612,7 +2633,37 @@ public static void Sum_KnownValues() Assert.Equal(0, TensorPrimitives.Sum([-3, 0, 3])); Assert.Equal(float.NaN, TensorPrimitives.Sum([-3, float.NaN, 3])); } + #endregion + + #region SumOfMagnitudes + [Theory] + [MemberData(nameof(TensorLengths))] + public static void SumOfMagnitudes(int tensorLength) + { + using BoundedMemory x = CreateAndFillTensor(tensorLength); + + Assert.Equal(Enumerable.Sum(MemoryMarshal.ToEnumerable(x.Memory), MathF.Abs), TensorPrimitives.SumOfMagnitudes(x), Tolerance); + + float sum = 0; + foreach (float f in x.Span) + { + sum += MathF.Abs(f); + } + Assert.Equal(sum, TensorPrimitives.SumOfMagnitudes(x), Tolerance); + } + + [Fact] + public static void SumOfMagnitudes_KnownValues() + { + Assert.Equal(0, TensorPrimitives.SumOfMagnitudes([0])); + Assert.Equal(1, TensorPrimitives.SumOfMagnitudes([0, 1])); + Assert.Equal(6, TensorPrimitives.SumOfMagnitudes([1, 2, 3])); + Assert.Equal(6, TensorPrimitives.SumOfMagnitudes([-3, 0, 3])); + Assert.Equal(float.NaN, TensorPrimitives.SumOfMagnitudes([-3, float.NaN, 3])); + } + #endregion + #region SumOfSquares [Theory] [MemberData(nameof(TensorLengths))] public static void SumOfSquares(int tensorLength) @@ -1638,56 +2689,56 @@ public static void SumOfSquares_KnownValues() Assert.Equal(18, TensorPrimitives.SumOfSquares([-3, 0, 3])); Assert.Equal(float.NaN, TensorPrimitives.SumOfSquares([-3, float.NaN, 3])); } + #endregion + #region Tanh [Theory] - [MemberData(nameof(TensorLengths))] - public static void SumOfMagnitudes(int tensorLength) + [MemberData(nameof(TensorLengthsIncluding0))] + public static void Tanh(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = CreateTensor(tensorLength); - Assert.Equal(Enumerable.Sum(MemoryMarshal.ToEnumerable(x.Memory), MathF.Abs), TensorPrimitives.SumOfMagnitudes(x), Tolerance); + TensorPrimitives.Tanh(x, destination); - float sum = 0; - foreach (float f in x.Span) + for (int i = 0; i < tensorLength; i++) { - sum += MathF.Abs(f); + Assert.Equal(MathF.Tanh(x[i]), destination[i], Tolerance); } - Assert.Equal(sum, TensorPrimitives.SumOfMagnitudes(x), Tolerance); - } - - [Fact] - public static void SumOfMagnitudes_KnownValues() - { - Assert.Equal(0, TensorPrimitives.SumOfMagnitudes([0])); - Assert.Equal(1, TensorPrimitives.SumOfMagnitudes([0, 1])); - Assert.Equal(6, TensorPrimitives.SumOfMagnitudes([1, 2, 3])); - Assert.Equal(6, TensorPrimitives.SumOfMagnitudes([-3, 0, 3])); - Assert.Equal(float.NaN, TensorPrimitives.SumOfMagnitudes([-3, float.NaN, 3])); } [Theory] [MemberData(nameof(TensorLengthsIncluding0))] - public static void Abs(int tensorLength) + public static void Tanh_InPlace(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); - using BoundedMemory destination = CreateTensor(tensorLength); + float[] xOrig = x.Span.ToArray(); - TensorPrimitives.Abs(x, destination); + TensorPrimitives.Tanh(x, x); - for (int i = 0; i < x.Length; i++) + for (int i = 0; i < tensorLength; i++) { - Assert.Equal(MathF.Abs(x[i]), destination[i], Tolerance); + Assert.Equal(MathF.Tanh(xOrig[i]), x[i], Tolerance); } } [Theory] [MemberData(nameof(TensorLengths))] - public static void Abs_ThrowsForTooShortDestination(int tensorLength) + public static void Tanh_ThrowsForTooShortDestination(int tensorLength) { using BoundedMemory x = CreateAndFillTensor(tensorLength); using BoundedMemory destination = CreateTensor(tensorLength - 1); - AssertExtensions.Throws("destination", () => TensorPrimitives.Abs(x, destination)); + AssertExtensions.Throws("destination", () => TensorPrimitives.Tanh(x, destination)); + } + + [Fact] + public static void Tanh_ThrowsForOverlapppingInputsWithOutputs() + { + float[] array = new float[10]; + AssertExtensions.Throws("destination", () => TensorPrimitives.Tanh(array.AsSpan(1, 2), array.AsSpan(0, 2))); + AssertExtensions.Throws("destination", () => TensorPrimitives.Tanh(array.AsSpan(1, 2), array.AsSpan(2, 2))); } + #endregion } } diff --git a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.netcore.cs b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.netcore.cs index 113f26048d352..af29b1d204203 100644 --- a/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.netcore.cs +++ b/src/libraries/System.Numerics.Tensors/tests/TensorPrimitivesTests.netcore.cs @@ -8,15 +8,16 @@ namespace System.Numerics.Tensors.Tests { public static partial class TensorPrimitivesTests { + #region ConvertToHalf [Theory] - [InlineData(0)] - [MemberData(nameof(TensorLengths))] + [MemberData(nameof(TensorLengthsIncluding0))] public static void ConvertToHalf(int tensorLength) { using BoundedMemory source = CreateAndFillTensor(tensorLength); foreach (int destLength in new[] { source.Length, source.Length + 1 }) { - Half[] destination = new Half[destLength]; + using BoundedMemory destination = BoundedMemory.Allocate(destLength); + destination.Span.Fill(Half.Zero); TensorPrimitives.ConvertToHalf(source, destination); @@ -35,6 +36,29 @@ public static void ConvertToHalf(int tensorLength) } } + [Theory] + [ActiveIssue("https://github.com/dotnet/runtime/issues/92885", typeof(PlatformDetection), nameof(PlatformDetection.IsBrowser), nameof(PlatformDetection.IsMonoAOT))] + [MemberData(nameof(TensorLengths))] + public static void ConvertToHalf_SpecialValues(int tensorLength) + { + using BoundedMemory source = CreateAndFillTensor(tensorLength); + using BoundedMemory destination = BoundedMemory.Allocate(tensorLength); + + // NaN, infinities, and 0s + source[s_random.Next(source.Length)] = float.NaN; + source[s_random.Next(source.Length)] = float.PositiveInfinity; + source[s_random.Next(source.Length)] = float.NegativeInfinity; + source[s_random.Next(source.Length)] = 0; + source[s_random.Next(source.Length)] = float.NegativeZero; + + TensorPrimitives.ConvertToHalf(source, destination); + + for (int i = 0; i < source.Length; i++) + { + Assert.Equal((Half)source[i], destination[i]); + } + } + [Theory] [MemberData(nameof(TensorLengths))] public static void ConvertToHalf_ThrowsForTooShortDestination(int tensorLength) @@ -44,13 +68,14 @@ public static void ConvertToHalf_ThrowsForTooShortDestination(int tensorLength) AssertExtensions.Throws("destination", () => TensorPrimitives.ConvertToHalf(source, destination)); } + #endregion + #region ConvertToSingle [Theory] - [InlineData(0)] - [MemberData(nameof(TensorLengths))] + [MemberData(nameof(TensorLengthsIncluding0))] public static void ConvertToSingle(int tensorLength) { - Half[] source = new Half[tensorLength]; + using BoundedMemory source = BoundedMemory.Allocate(tensorLength); for (int i = 0; i < source.Length; i++) { source[i] = (Half)s_random.NextSingle(); @@ -77,6 +102,32 @@ public static void ConvertToSingle(int tensorLength) } } } + [Theory] + [MemberData(nameof(TensorLengths))] + public static void ConvertToSingle_SpecialValues(int tensorLength) + { + using BoundedMemory source = BoundedMemory.Allocate(tensorLength); + for (int i = 0; i < source.Length; i++) + { + source[i] = (Half)s_random.NextSingle(); + } + + using BoundedMemory destination = CreateTensor(tensorLength); + + // NaN, infinities, and 0s + source[s_random.Next(source.Length)] = Half.NaN; + source[s_random.Next(source.Length)] = Half.PositiveInfinity; + source[s_random.Next(source.Length)] = Half.NegativeInfinity; + source[s_random.Next(source.Length)] = Half.Zero; + source[s_random.Next(source.Length)] = Half.NegativeZero; + + TensorPrimitives.ConvertToSingle(source, destination); + + for (int i = 0; i < source.Length; i++) + { + Assert.Equal((float)source[i], destination[i]); + } + } [Theory] [MemberData(nameof(TensorLengths))] @@ -87,5 +138,6 @@ public static void ConvertToSingle_ThrowsForTooShortDestination(int tensorLength AssertExtensions.Throws("destination", () => TensorPrimitives.ConvertToSingle(source, destination)); } + #endregion } } diff --git a/src/libraries/System.Private.CoreLib/src/Resources/Strings.resx b/src/libraries/System.Private.CoreLib/src/Resources/Strings.resx index 2e00d445ee47e..e1cc39aeaf4aa 100644 --- a/src/libraries/System.Private.CoreLib/src/Resources/Strings.resx +++ b/src/libraries/System.Private.CoreLib/src/Resources/Strings.resx @@ -3635,6 +3635,9 @@ Unknown error "{0}". + + One or more system calls failed: {0} + Operation could destabilize the runtime. diff --git a/src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems b/src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems index 0d6ae242ae6ec..2b774a2f4fd55 100644 --- a/src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems +++ b/src/libraries/System.Private.CoreLib/src/System.Private.CoreLib.Shared.projitems @@ -434,6 +434,7 @@ + @@ -1013,7 +1014,9 @@ + + @@ -2148,6 +2151,7 @@ + @@ -2205,7 +2209,6 @@ Common\System\Memory\FixedBufferExtensions.cs - @@ -2716,4 +2719,4 @@ - \ No newline at end of file + diff --git a/src/libraries/System.Private.CoreLib/src/System/Diagnostics/Tracing/EventPipeEventDispatcher.cs b/src/libraries/System.Private.CoreLib/src/System/Diagnostics/Tracing/EventPipeEventDispatcher.cs index 708c5afc1bc70..030560b200214 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Diagnostics/Tracing/EventPipeEventDispatcher.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Diagnostics/Tracing/EventPipeEventDispatcher.cs @@ -103,8 +103,8 @@ private void CommitDispatchConfiguration() new EventPipeProviderConfiguration(NativeRuntimeEventSource.EventSourceName, (ulong)aggregatedKeywords, (uint)enableLevel, null) }; - m_sessionID = EventPipeInternal.Enable(null, EventPipeSerializationFormat.NetTrace, DefaultEventListenerCircularMBSize, providerConfiguration); - if (m_sessionID == 0) + ulong sessionID = EventPipeInternal.Enable(null, EventPipeSerializationFormat.NetTrace, DefaultEventListenerCircularMBSize, providerConfiguration); + if (sessionID == 0) { throw new EventSourceException(SR.EventSource_CouldNotEnableEventPipe); } @@ -113,7 +113,7 @@ private void CommitDispatchConfiguration() EventPipeSessionInfo sessionInfo; unsafe { - if (!EventPipeInternal.GetSessionInfo(m_sessionID, &sessionInfo)) + if (!EventPipeInternal.GetSessionInfo(sessionID, &sessionInfo)) { Debug.Fail("GetSessionInfo returned false."); } @@ -124,8 +124,11 @@ private void CommitDispatchConfiguration() long syncTimeQPC = sessionInfo.StartTimeStamp; long timeQPCFrequency = sessionInfo.TimeStampFrequency; + Debug.Assert(Volatile.Read(ref m_sessionID) == 0); + Volatile.Write(ref m_sessionID, sessionID); + // Start the dispatch task. - StartDispatchTask(m_sessionID, syncTimeUtc, syncTimeQPC, timeQPCFrequency); + StartDispatchTask(sessionID, syncTimeUtc, syncTimeQPC, timeQPCFrequency); } private void StartDispatchTask(ulong sessionID, DateTime syncTimeUtc, long syncTimeQPC, long timeQPCFrequency) @@ -147,10 +150,11 @@ private void SetStopDispatchTask() return; } - Debug.Assert(m_sessionID != 0); + ulong sessionID = Volatile.Read(ref m_sessionID); + Debug.Assert(sessionID != 0); m_dispatchTaskCancellationSource.Cancel(); - EventPipeInternal.SignalSession(m_sessionID); - m_sessionID = 0; + EventPipeInternal.SignalSession(sessionID); + Volatile.Write(ref m_sessionID, 0); } private unsafe void DispatchEventsToEventListeners(ulong sessionID, DateTime syncTimeUtc, long syncTimeQPC, long timeQPCFrequency, Task? previousDispatchTask, CancellationToken token) @@ -190,12 +194,17 @@ private unsafe void DispatchEventsToEventListeners(ulong sessionID, DateTime syn } } - lock (m_dispatchControlLock) + // Wait for SignalSession() to be called before we call disable, otherwise + // the SignalSession() call could be on a disabled session. + SpinWait sw = default; + while (Volatile.Read(ref m_sessionID) == sessionID) { - // Disable the old session. This can happen asynchronously since we aren't using the old session - // anymore. We take the lock to make sure we don't call SignalSession on an invalid session ID. - EventPipeInternal.Disable(sessionID); + sw.SpinOnce(); } + + // Disable the old session. This can happen asynchronously since we aren't using the old session + // anymore. + EventPipeInternal.Disable(sessionID); } /// diff --git a/src/libraries/System.Private.CoreLib/src/System/Half.cs b/src/libraries/System.Private.CoreLib/src/System/Half.cs index 8daa37bbab576..cd3e6ab3ed73c 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Half.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Half.cs @@ -1044,7 +1044,7 @@ public static explicit operator float(Half value) // BitConverter.SingleToUInt32Bits(1.0f) - ((uint)BitConverter.HalfToUInt16Bits((Half)1.0f) << 13) const uint ExponentOffset = 0x3800_0000u; // Mask for sign bit in Single - const uint FloatSignMask = float.SignMask; + const uint SingleSignMask = float.SignMask; // Mask for exponent bits in Half const uint HalfExponentMask = BiasedExponentMask; // Mask for bits in Single converted from Half @@ -1052,7 +1052,7 @@ public static explicit operator float(Half value) // Extract the internal representation of value short valueInInt16Bits = BitConverter.HalfToInt16Bits(value); // Extract sign bit of value - uint sign = (uint)(int)valueInInt16Bits & FloatSignMask; + uint sign = (uint)(int)valueInInt16Bits & SingleSignMask; // Copy sign bit to upper bits uint bitValueInProcess = (uint)valueInInt16Bits; // Extract exponent bits of value (BiasedExponent is not for here as it performs unnecessary shift) diff --git a/src/libraries/System.Private.CoreLib/src/System/Reflection/NullabilityInfoContext.cs b/src/libraries/System.Private.CoreLib/src/System/Reflection/NullabilityInfoContext.cs index e3e4da36d096b..9411bad737716 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Reflection/NullabilityInfoContext.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Reflection/NullabilityInfoContext.cs @@ -86,35 +86,47 @@ public NullabilityInfo Create(ParameterInfo parameterInfo) private void CheckParameterMetadataType(ParameterInfo parameter, NullabilityInfo nullability) { - if (parameter.Member is MethodInfo method) + ParameterInfo? metaParameter; + MemberInfo metaMember; + + switch (parameter.Member) { - MethodInfo metaMethod = GetMethodMetadataDefinition(method); - ParameterInfo? metaParameter = null; - if (string.IsNullOrEmpty(parameter.Name)) - { - metaParameter = metaMethod.ReturnParameter; - } - else - { - ReadOnlySpan parameters = metaMethod.GetParametersAsSpan(); - for (int i = 0; i < parameters.Length; i++) - { - if (parameter.Position == i && - parameter.Name == parameters[i].Name) - { - metaParameter = parameters[i]; - break; - } - } - } + case ConstructorInfo ctor: + var metaCtor = (ConstructorInfo)GetMemberMetadataDefinition(ctor); + metaMember = metaCtor; + metaParameter = GetMetaParameter(metaCtor, parameter); + break; + + case MethodInfo method: + MethodInfo metaMethod = GetMethodMetadataDefinition(method); + metaMember = metaMethod; + metaParameter = string.IsNullOrEmpty(parameter.Name) ? metaMethod.ReturnParameter : GetMetaParameter(metaMethod, parameter); + break; + + default: + return; + } + + if (metaParameter != null) + { + CheckGenericParameters(nullability, metaMember, metaParameter.ParameterType, parameter.Member.ReflectedType); + } + } - if (metaParameter != null) + private static ParameterInfo? GetMetaParameter(MethodBase metaMethod, ParameterInfo parameter) + { + ReadOnlySpan parameters = metaMethod.GetParametersAsSpan(); + for (int i = 0; i < parameters.Length; i++) + { + if (parameter.Position == i && + parameter.Name == parameters[i].Name) { - CheckGenericParameters(nullability, metaMethod, metaParameter.ParameterType, parameter.Member.ReflectedType); + return parameters[i]; } } - } + return null; + } private static MethodInfo GetMethodMetadataDefinition(MethodInfo method) { if (method.IsGenericMethod && !method.IsGenericMethodDefinition) diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/LibraryImportAttribute.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/LibraryImportAttribute.cs index f8ef31ee41f61..dc8768a6f81de 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/LibraryImportAttribute.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/InteropServices/LibraryImportAttribute.cs @@ -17,6 +17,10 @@ namespace System.Runtime.InteropServices #if SYSTEM_PRIVATE_CORELIB public #else +#pragma warning disable CS0436 // Type conflicts with imported type + // Some assemblies that target downlevel have InternalsVisibleTo to their test assembiles. + // As this is only used in this repo and isn't a problem in shipping code, + // just disable the duplicate type warning. internal #endif sealed class LibraryImportAttribute : Attribute diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs index a945aecc79d09..2eb823f56d97d 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.PlatformNotSupported.cs @@ -3176,6 +3176,56 @@ internal Arm64() { } /// public static unsafe void StorePairScalarNonTemporal(uint* address, Vector64 value1, Vector64 value2) { throw new PlatformNotSupportedException(); } + /// + /// A64: ST2 { Vn.16B, Vn+1.16B }, [Xn] + /// + public static unsafe void StoreVector128x2(byte* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.16B, Vn+1.16B }, [Xn] + /// + public static unsafe void StoreVector128x2(sbyte* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.8H, Vn+1.8H }, [Xn] + /// + public static unsafe void StoreVector128x2(short* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.8H, Vn+1.8H }, [Xn] + /// + public static unsafe void StoreVector128x2(ushort* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.4S, Vn+1.4S }, [Xn] + /// + public static unsafe void StoreVector128x2(int* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.4S, Vn+1.4S }, [Xn] + /// + public static unsafe void StoreVector128x2(uint* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.2D, Vn+1.2D }, [Xn] + /// + public static unsafe void StoreVector128x2(long* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.2D, Vn+1.2D }, [Xn] + /// + public static unsafe void StoreVector128x2(ulong* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.4S, Vn+1.4S }, [Xn] + /// + public static unsafe void StoreVector128x2(float* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.2D, Vn+1.2D }, [Xn] + /// + public static unsafe void StoreVector128x2(double* address, (Vector128 Value1, Vector128 Value2) value) { throw new PlatformNotSupportedException(); } + /// /// float64x2_t vsubq_f64 (float64x2_t a, float64x2_t b) /// A64: FSUB Vd.2D, Vn.2D, Vm.2D @@ -14437,6 +14487,41 @@ internal Arm64() { } /// public static unsafe void StoreSelectedScalar(ulong* address, Vector128 value, [ConstantExpected(Max = (byte)(1))] byte index) { throw new PlatformNotSupportedException(); } + /// + /// A64: ST2 { Vn.8B, Vn+1.8B }, [Xn] + /// + public static unsafe void StoreVector64x2(byte* address, (Vector64 Value1, Vector64 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.8B, Vn+1.8B }, [Xn] + /// + public static unsafe void StoreVector64x2(sbyte* address, (Vector64 Value1, Vector64 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.4H, Vn+1.4H }, [Xn] + /// + public static unsafe void StoreVector64x2(short* address, (Vector64 Value1, Vector64 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.4H, Vn+1.4H }, [Xn] + /// + public static unsafe void StoreVector64x2(ushort* address, (Vector64 Value1, Vector64 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.2S, Vn+1.2S }, [Xn] + /// + public static unsafe void StoreVector64x2(int* address, (Vector64 Value1, Vector64 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.2S, Vn+1.2S }, [Xn] + /// + public static unsafe void StoreVector64x2(uint* address, (Vector64 Value1, Vector64 Value2) value) { throw new PlatformNotSupportedException(); } + + /// + /// A64: ST2 { Vn.2S, Vn+1.2S }, [Xn] + /// + public static unsafe void StoreVector64x2(float* address, (Vector64 Value1, Vector64 Value2) value) { throw new PlatformNotSupportedException(); } + /// /// uint8x8_t vsub_u8 (uint8x8_t a, uint8x8_t b) /// A32: VSUB.I8 Dd, Dn, Dm diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs index 72f1c60311491..5d22812bcd9ff 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs @@ -3174,6 +3174,56 @@ internal Arm64() { } /// public static unsafe void StorePairScalarNonTemporal(uint* address, Vector64 value1, Vector64 value2) => StorePairScalarNonTemporal(address, value1, value2); + /// + /// A64: ST2 { Vn.16B, Vn+1.16B }, [Xn] + /// + public static unsafe void StoreVector128x2(byte* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.16B, Vn+1.16B }, [Xn] + /// + public static unsafe void StoreVector128x2(sbyte* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.8H, Vn+1.8H }, [Xn] + /// + public static unsafe void StoreVector128x2(short* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.8H, Vn+1.8H }, [Xn] + /// + public static unsafe void StoreVector128x2(ushort* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.4S, Vn+1.4S }, [Xn] + /// + public static unsafe void StoreVector128x2(int* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.4S, Vn+1.4S }, [Xn] + /// + public static unsafe void StoreVector128x2(uint* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.2D, Vn+1.2D }, [Xn] + /// + public static unsafe void StoreVector128x2(long* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.2D, Vn+1.2D }, [Xn] + /// + public static unsafe void StoreVector128x2(ulong* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.4S, Vn+1.4S }, [Xn] + /// + public static unsafe void StoreVector128x2(float* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + + /// + /// A64: ST2 { Vn.2D, Vn+1.2D }, [Xn] + /// + public static unsafe void StoreVector128x2(double* address, (Vector128 Value1, Vector128 Value2) value) => StoreVector128x2(address, value); + /// /// float64x2_t vsubq_f64 (float64x2_t a, float64x2_t b) /// A64: FSUB Vd.2D, Vn.2D, Vm.2D @@ -14434,6 +14484,41 @@ internal Arm64() { } /// public static unsafe void StoreSelectedScalar(ulong* address, Vector128 value, [ConstantExpected(Max = (byte)(1))] byte index) => StoreSelectedScalar(address, value, index); + /// + /// A64: ST2 { Vn.8B, Vn+1.8B }, [Xn] + /// + public static unsafe void StoreVector64x2(byte* address, (Vector64 Value1, Vector64 Value2) value) => StoreVector64x2(address, value); + + /// + /// A64: ST2 { Vn.8B, Vn+1.8B }, [Xn] + /// + public static unsafe void StoreVector64x2(sbyte* address, (Vector64 Value1, Vector64 Value2) value) => StoreVector64x2(address, value); + + /// + /// A64: ST2 { Vn.4H, Vn+1.4H }, [Xn] + /// + public static unsafe void StoreVector64x2(short* address, (Vector64 Value1, Vector64 Value2) value) => StoreVector64x2(address, value); + + /// + /// A64: ST2 { Vn.4H, Vn+1.4H }, [Xn] + /// + public static unsafe void StoreVector64x2(ushort* address, (Vector64 Value1, Vector64 Value2) value) => StoreVector64x2(address, value); + + /// + /// A64: ST2 { Vn.2S, Vn+1.2S }, [Xn] + /// + public static unsafe void StoreVector64x2(int* address, (Vector64 Value1, Vector64 Value2) value) => StoreVector64x2(address, value); + + /// + /// A64: ST2 { Vn.2S, Vn+1.2S }, [Xn] + /// + public static unsafe void StoreVector64x2(uint* address, (Vector64 Value1, Vector64 Value2) value) => StoreVector64x2(address, value); + + /// + /// A64: ST2 { Vn.2S, Vn+1.2S }, [Xn] + /// + public static unsafe void StoreVector64x2(float* address, (Vector64 Value1, Vector64 Value2) value) => StoreVector64x2(address, value); + /// /// uint8x8_t vsub_u8 (uint8x8_t a, uint8x8_t b) /// A32: VSUB.I8 Dd, Dn, Dm diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/ISimdVector_2.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/ISimdVector_2.cs new file mode 100644 index 0000000000000..a1d49045bb2ac --- /dev/null +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/ISimdVector_2.cs @@ -0,0 +1,558 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Numerics; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +namespace System.Runtime.Intrinsics +{ + /// Defines a single instruction, multiple data (SIMD) vector type. + /// The type that implements the interface. + /// The type of the elements in the vector. + internal unsafe interface ISimdVector + : IAdditionOperators, + // IAdditiveIdentity, + IBitwiseOperators, + // IComparisonOperators, + // IDecrementOperators, + IDivisionOperators, + IEqualityOperators, + IEquatable, + // IIncrementOperators, + // IMinMaxValue, + // IModulusOperators, + // IMultiplicativeIdentity, + IMultiplyOperators, + IShiftOperators, + // ISpanFormattable, + ISubtractionOperators, + IUnaryNegationOperators, + IUnaryPlusOperators + // IUtf8SpanFormattable + where TSelf : ISimdVector? + { + /// Gets the natural alignment of the vector. + /// The type of the elements in the vector () is not supported. + static abstract int Alignment { get; } + + /// Gets an instance of the vector type in which all bits are set. + /// The type of the elements in the vector () is not supported. + static abstract TSelf AllBitsSet { get; } + + /// Gets the number of that are in the vector. + /// The type of the elements in the vector () is not supported. + static abstract int Count { get; } + + /// Gets a value that indicates whether the vector operations are subject to hardware acceleration through JIT intrinsic support. + /// if the vector operations are subject to hardware acceleration; otherwise, . + static abstract bool IsHardwareAccelerated { get; } + + /// Gets if is supported; otherwise, . + /// if is supported; otherwise, . + static abstract bool IsSupported { get; } + + /// Gets an instance of the vector type in which each element is the value one. + /// The type of the elements in the vector () is not supported. + static abstract TSelf One { get; } + + /// Gets an instance of the vector type in which each element is the value zero. + /// The type of the elements in the vector () is not supported. + static abstract TSelf Zero { get; } + + /// Gets the element at the specified index. + /// The index of the element to get. + /// The value of the element at . + /// was less than zero or greater than the number of elements. + /// The type of the elements in the vector () is not supported. + abstract T this[int index] { get; } + + /// Divides a vector by a scalar to compute the per-element quotient. + /// The vector that will be divided by . + /// The scalar that will divide . + /// The quotient of divided by . + static abstract TSelf operator /(TSelf left, T right); + + /// Multiplies a vector by a scalar to compute their product. + /// The vector to multiply with . + /// The scalar to multiply with . + /// The product of and . + /// The type of the vector () is not supported. + static abstract TSelf operator *(TSelf left, T right); + + /// Computes the absolute of a vector. + /// The vector for which to get its absolute. + /// A absolute of . + /// The type of the elements in the vector () is not supported. + static abstract TSelf Abs(TSelf vector); + + /// Adds two vectors to compute their sum. + /// The vector to add with . + /// The vector to add with . + /// The sum of and . + /// The type of and () is not supported. + static virtual TSelf Add(TSelf left, TSelf right) => left + right; + + /// Computes the bitwise-and of a given vector and the ones complement of another vector. + /// The vector to bitwise-and with . + /// The vector to that is ones-complemented before being bitwise-and with . + /// The bitwise-and of and the ones-complement of . + static virtual TSelf AndNot(TSelf left, TSelf right) => left & ~right; + + /// Computes the bitwise-and of two vectors. + /// The vector to bitwise-and with . + /// The vector to bitwise-and with . + /// The bitwise-and of and . + /// The type of and () is not supported. + static virtual TSelf BitwiseAnd(TSelf left, TSelf right) => left & right; + + /// Computes the bitwise-or of two vectors. + /// The vector to bitwise-or with . + /// The vector to bitwise-or with . + /// The bitwise-or of and . + /// The type of and () is not supported. + static virtual TSelf BitwiseOr(TSelf left, TSelf right) => left | right; + + /// Computes the ceiling of each element in a vector. + /// The vector that will have its ceiling computed. + /// A vector whose elements are the ceiling of the elements in . + static abstract TSelf Ceiling(TSelf vector); + + /// Conditionally selects bits from two vectors based on a given condition. + /// The mask that is used to select a value from or . + /// The vector that is selected when the corresponding bit in is one. + /// The vector that is selected when the corresponding bit in is zero. + /// A vector whose bits come from or based on the value of . + /// The type of the elements in the vector () is not supported. + static virtual TSelf ConditionalSelect(TSelf condition, TSelf left, TSelf right) => (left & condition) | (right & ~condition); + + /// Copies a vector to a given array. + /// The vector to be copied. + /// The array to which is copied. + /// The length of is less than . + /// The type of the elements in the vector () is not supported. + /// is null. + static virtual void CopyTo(TSelf vector, T[] destination) => TSelf.CopyTo(vector, destination.AsSpan()); + + /// Copies a vector to a given array starting at the specified index. + /// The vector to be copied. + /// The array to which is copied. + /// The starting index of which will be copied to. + /// The length of is less than . + /// is negative or greater than the length of . + /// The type of the elements in the vector () is not supported. + /// is null. + static virtual void CopyTo(TSelf vector, T[] destination, int startIndex) => TSelf.CopyTo(vector, destination.AsSpan(startIndex)); + + /// Copies a vector to a given span. + /// The vector to be copied. + /// The span to which the is copied. + /// The length of is less than . + /// The type of the elements in the vector () is not supported. + static virtual void CopyTo(TSelf vector, Span destination) + { + if (destination.Length < TSelf.Count) + { + ThrowHelper.ThrowArgumentException_DestinationTooShort(); + } + TSelf.StoreUnsafe(vector, ref MemoryMarshal.GetReference(destination)); + } + + /// Creates a new vector with all elements initialized to the specified value. + /// The value that all elements will be initialized to. + /// A new vector with all elements initialized to . + /// The type of the elements in the vector () is not supported. + static abstract TSelf Create(T value); + + /// Creates a new vector from a given array. + /// The array from which the vector is created. + /// A new vector with its elements set to the first elements from . + /// The length of is less than . + /// The type of the elements in the vector () is not supported. + /// is null. + static virtual TSelf Create(T[] values) => TSelf.Create(values.AsSpan()); + + /// Creates a new vector from a given array. + /// The array from which the vector is created. + /// The index in at which to being reading elements. + /// A new vector with its elements set to the first elements from . + /// The length of , starting from , is less than . + /// The type of the elements in the vector () is not supported. + /// is null. + static virtual TSelf Create(T[] values, int index) => TSelf.Create(values.AsSpan(index)); + + /// Creates a new vector from a given readonly span. + /// The readonly span from which the vector is created. + /// A new vector with its elements set to the first elements from . + /// The length of is less than . + /// The type of the elements in the vector () is not supported. + static virtual TSelf Create(ReadOnlySpan values) + { + if (values.Length < TSelf.Count) + { + ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.values); + } + return TSelf.LoadUnsafe(ref MemoryMarshal.GetReference(values)); + } + + /// Creates a new vector with the first element initialized to the specified value and the remaining elements initialized to zero. + /// The value that element 0 will be initialized to. + /// A new vector with the first element initialized to and the remaining elements initialized to zero. + /// The type of the elements in the vector () is not supported. + static virtual TSelf CreateScalar(T value) => TSelf.WithElement(TSelf.Zero, 0, value); + + /// Creates a new vector with the first element initialized to the specified value and the remaining elements left uninitialized. + /// The value that element 0 will be initialized to. + /// A new vector with the first element initialized to and the remaining elements left uninitialized. + /// The type of the elements in the vector () is not supported. + static virtual TSelf CreateScalarUnsafe(T value) + { + // This relies on us stripping the "init" flag from the ".locals" + // declaration to let the upper bits be uninitialized. + + Unsafe.SkipInit(out TSelf result); + return TSelf.WithElement(result, 0, value); + } + + /// Divides two vectors to compute their quotient. + /// The vector that will be divided by . + /// The vector that will divide . + /// The quotient of divided by . + /// The type of and () is not supported. + static virtual TSelf Divide(TSelf left, TSelf right) => left / right; + + /// Divides a vector by a scalar to compute the per-element quotient. + /// The vector that will be divided by . + /// The scalar that will divide . + /// The quotient of divided by . + static virtual TSelf Divide(TSelf left, T right) => left / right; + + /// Computes the dot product of two vectors. + /// The vector that will be dotted with . + /// The vector that will be dotted with . + /// The dot product of and . + /// The type of and () is not supported. + static abstract T Dot(TSelf left, TSelf right); + + /// Compares two vectors to determine if they are equal on a per-element basis. + /// The vector to compare with . + /// The vector to compare with . + /// A vector whose elements are all-bits-set or zero, depending on if the corresponding elements in and were equal. + /// The type of the elements in the vector () is not supported. + static abstract TSelf Equals(TSelf left, TSelf right); + + /// Compares two vectors to determine if all elements are equal. + /// The vector to compare with . + /// The vector to compare with . + /// true if all elements in were equal to the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool EqualsAll(TSelf left, TSelf right); + + /// Compares two vectors to determine if any elements are equal. + /// The vector to compare with . + /// The vector to compare with . + /// true if any elements in was equal to the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool EqualsAny(TSelf left, TSelf right); + + /// Computes the floor of each element in a vector. + /// The vector that will have its floor computed. + /// A vector whose elements are the floor of the elements in . + static abstract TSelf Floor(TSelf vector); + + /// Gets the element at the specified index. + /// The vector to get the element from. + /// The index of the element to get. + /// The value of the element at . + /// was less than zero or greater than the number of elements. + /// The type of the elements in the vector () is not supported. + static abstract T GetElement(TSelf vector, int index); + + /// Compares two vectors to determine which is greater on a per-element basis. + /// The vector to compare with . + /// The vector to compare with . + /// A vector whose elements are all-bits-set or zero, depending on if which of the corresponding elements in and were greater. + /// The type of the elements in the vector () is not supported. + static abstract TSelf GreaterThan(TSelf left, TSelf right); + + /// Compares two vectors to determine if all elements are greater. + /// The vector to compare with . + /// The vector to compare with . + /// true if all elements in were greater than the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool GreaterThanAll(TSelf left, TSelf right); + + /// Compares two vectors to determine if any elements are greater. + /// The vector to compare with . + /// The vector to compare with . + /// true if any elements in was greater than the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool GreaterThanAny(TSelf left, TSelf right); + + /// Compares two vectors to determine which is greater or equal on a per-element basis. + /// The vector to compare with . + /// The vector to compare with . + /// A vector whose elements are all-bits-set or zero, depending on if which of the corresponding elements in and were greater or equal. + /// The type of the elements in the vector () is not supported. + static abstract TSelf GreaterThanOrEqual(TSelf left, TSelf right); + + /// Compares two vectors to determine if all elements are greater or equal. + /// The vector to compare with . + /// The vector to compare with . + /// true if all elements in were greater than or equal to the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool GreaterThanOrEqualAll(TSelf left, TSelf right); + + /// Compares two vectors to determine if any elements are greater or equal. + /// The vector to compare with . + /// The vector to compare with . + /// true if any elements in was greater than or equal to the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool GreaterThanOrEqualAny(TSelf left, TSelf right); + + /// Compares two vectors to determine which is less on a per-element basis. + /// The vector to compare with . + /// The vector to compare with . + /// A vector whose elements are all-bits-set or zero, depending on if which of the corresponding elements in and were less. + /// The type of the elements in the vector () is not supported. + static abstract TSelf LessThan(TSelf left, TSelf right); + + /// Compares two vectors to determine if all elements are less. + /// The vector to compare with . + /// The vector to compare with . + /// true if all elements in were less than the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool LessThanAll(TSelf left, TSelf right); + + /// Compares two vectors to determine if any elements are less. + /// The vector to compare with . + /// The vector to compare with . + /// true if any elements in was less than the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool LessThanAny(TSelf left, TSelf right); + + /// Compares two vectors to determine which is less or equal on a per-element basis. + /// The vector to compare with . + /// The vector to compare with . + /// A vector whose elements are all-bits-set or zero, depending on if which of the corresponding elements in and were less or equal. + /// The type of the elements in the vector () is not supported. + static abstract TSelf LessThanOrEqual(TSelf left, TSelf right); + + /// Compares two vectors to determine if all elements are less or equal. + /// The vector to compare with . + /// The vector to compare with . + /// true if all elements in were less than or equal to the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool LessThanOrEqualAll(TSelf left, TSelf right); + + /// Compares two vectors to determine if any elements are less or equal. + /// The vector to compare with . + /// The vector to compare with . + /// true if any elements in was less than or equal to the corresponding element in . + /// The type of the elements in the vector () is not supported. + static abstract bool LessThanOrEqualAny(TSelf left, TSelf right); + +#pragma warning disable CS8500 // This takes the address of, gets the size of, or declares a pointer to a managed type ('T') + /// Loads a vector from the given source. + /// The source from which the vector will be loaded. + /// The vector loaded from . + /// The type of () is not supported. + static virtual TSelf Load(T* source) => TSelf.LoadUnsafe(ref *source); + + /// Loads a vector from the given aligned source. + /// The aligned source from which the vector will be loaded. + /// The vector loaded from . + /// The type of () is not supported. + static virtual TSelf LoadAligned(T* source) + { + if (((nuint)(source) % (uint)(TSelf.Alignment)) != 0) + { + ThrowHelper.ThrowAccessViolationException(); + } + return TSelf.LoadUnsafe(ref *source); + } + + /// Loads a vector from the given aligned source. + /// The aligned source from which the vector will be loaded. + /// The vector loaded from . + /// This method may bypass the cache on certain platforms. + /// The type of () is not supported. + static virtual TSelf LoadAlignedNonTemporal(T* source) => TSelf.LoadAligned(source); +#pragma warning restore CS8500 // This takes the address of, gets the size of, or declares a pointer to a managed type ('T') + + /// Loads a vector from the given source. + /// The source from which the vector will be loaded. + /// The vector loaded from . + /// The type of the elements in the vector () is not supported. + static virtual TSelf LoadUnsafe(ref readonly T source) => TSelf.LoadUnsafe(in source, elementOffset: 0); + + /// Loads a vector from the given source and element offset. + /// The source to which will be added before loading the vector. + /// The element offset from from which the vector will be loaded. + /// The vector loaded from plus . + /// The type of the elements in the vector () is not supported. + static abstract TSelf LoadUnsafe(ref readonly T source, nuint elementOffset); + + /// Computes the maximum of two vectors on a per-element basis. + /// The vector to compare with . + /// The vector to compare with . + /// A vector whose elements are the maximum of the corresponding elements in and . + /// The type of the elements in the vector () is not supported. + static abstract TSelf Max(TSelf left, TSelf right); + + /// Computes the minimum of two vectors on a per-element basis. + /// The vector to compare with . + /// The vector to compare with . + /// A vector whose elements are the minimum of the corresponding elements in and . + /// The type of the elements in the vector () is not supported. + static abstract TSelf Min(TSelf left, TSelf right); + + /// Multiplies two vectors to compute their element-wise product. + /// The vector to multiply with . + /// The vector to multiply with . + /// The element-wise product of and . + /// The type of and () is not supported. + static virtual TSelf Multiply(TSelf left, TSelf right) => left * right; + + /// Multiplies a vector by a scalar to compute their product. + /// The vector to multiply with . + /// The scalar to multiply with . + /// The product of and . + /// The type of and () is not supported. + static virtual TSelf Multiply(TSelf left, T right) => left * right; + + /// Negates a vector. + /// The vector to negate. + /// A vector whose elements are the negation of the corresponding elements in . + /// The type of () is not supported. + static virtual TSelf Negate(TSelf vector) => -vector; + + /// Computes the ones-complement of a vector. + /// The vector whose ones-complement is to be computed. + /// A vector whose elements are the ones-complement of the corresponding elements in . + /// The type of () is not supported. + static virtual TSelf OnesComplement(TSelf vector) => ~vector; + + /// Shifts each element of a vector left by the specified amount. + /// The vector whose elements are to be shifted. + /// The number of bits by which to shift each element. + /// A vector whose elements where shifted left by . + static virtual TSelf ShiftLeft(TSelf vector, int shiftCount) => vector << shiftCount; + + /// Shifts (signed) each element of a vector right by the specified amount. + /// The vector whose elements are to be shifted. + /// The number of bits by which to shift each element. + /// A vector whose elements where shifted right by . + static virtual TSelf ShiftRightArithmetic(TSelf vector, int shiftCount) => vector >> shiftCount; + + /// Shifts (unsigned) each element of a vector right by the specified amount. + /// The vector whose elements are to be shifted. + /// The number of bits by which to shift each element. + /// A vector whose elements where shifted right by . + static virtual TSelf ShiftRightLogical(TSelf vector, int shiftCount) => vector >>> shiftCount; + + /// Computes the square root of a vector on a per-element basis. + /// The vector whose square root is to be computed. + /// A vector whose elements are the square root of the corresponding elements in . + /// The type of () is not supported. + static abstract TSelf Sqrt(TSelf vector); + +#pragma warning disable CS8500 // This takes the address of, gets the size of, or declares a pointer to a managed type ('T') + /// Stores a vector at the given destination. + /// The vector that will be stored. + /// The destination at which will be stored. + /// The type of () is not supported. + static virtual void Store(TSelf source, T* destination) => TSelf.StoreUnsafe(source, ref *destination); + + /// Stores a vector at the given aligned destination. + /// The vector that will be stored. + /// The aligned destination at which will be stored. + /// The type of () is not supported. + static virtual void StoreAligned(TSelf source, T* destination) + { + if (((nuint)(destination) % (uint)(TSelf.Alignment)) != 0) + { + ThrowHelper.ThrowAccessViolationException(); + } + TSelf.StoreUnsafe(source, ref *destination); + } + + /// Stores a vector at the given aligned destination. + /// The vector that will be stored. + /// The aligned destination at which will be stored. + /// This method may bypass the cache on certain platforms. + /// The type of () is not supported. + static virtual void StoreAlignedNonTemporal(TSelf source, T* destination) => TSelf.StoreAligned(source, destination); +#pragma warning restore CS8500 // This takes the address of, gets the size of, or declares a pointer to a managed type ('T') + + /// Stores a vector at the given destination. + /// The vector that will be stored. + /// The destination at which will be stored. + /// The type of the elements in the vector () is not supported. + static virtual void StoreUnsafe(TSelf vector, ref T destination) => TSelf.StoreUnsafe(vector, ref destination, elementOffset: 0); + + /// Stores a vector at the given destination. + /// The vector that will be stored. + /// The destination to which will be added before the vector will be stored. + /// The element offset from from which the vector will be stored. + /// The type of the elements in the vector () is not supported. + static abstract void StoreUnsafe(TSelf vector, ref T destination, nuint elementOffset); + + /// Subtracts two vectors to compute their difference. + /// The vector from which will be subtracted. + /// The vector to subtract from . + /// The difference of and . + /// The type of and () is not supported. + static virtual TSelf Subtract(TSelf left, TSelf right) => left - right; + + /// Computes the sum of all elements in a vector. + /// The vector whose elements will be summed. + /// The sum of all elements in . + /// The type of () is not supported. + static abstract T Sum(TSelf vector); + + /// Converts the given vector to a scalar containing the value of the first element. + /// The vector to get the first element from. + /// A scalar containing the value of the first element. + /// The type of the elements in the vector () is not supported. + static virtual T ToScalar(TSelf vector) => TSelf.GetElement(vector, 0); + + /// Tries to copy a to a given span. + /// The vector to copy. + /// The span to which is copied. + /// true if was successfully copied to ; otherwise, false if the length of is less than . + /// The type of the elements in the vector () is not supported. + static virtual bool TryCopyTo(TSelf vector, Span destination) + { + if (destination.Length < TSelf.Count) + { + return false; + } + + TSelf.StoreUnsafe(vector, ref MemoryMarshal.GetReference(destination)); + return true; + } + + /// Creates a new vector with the element at the specified index set to the specified value and the remaining elements set to the same value as that in the given vector. + /// The vector to get the remaining elements from. + /// The index of the element to set. + /// The value to set the element to. + /// A vector with the value of the element at set to and the remaining elements set to the same value as that in . + /// was less than zero or greater than the number of elements. + /// The type of the elements in the vector () is not supported. + static abstract TSelf WithElement(TSelf vector, int index, T value); + + /// Computes the exclusive-or of two vectors. + /// The vector to exclusive-or with . + /// The vector to exclusive-or with . + /// The exclusive-or of and . + /// The type of and () is not supported. + static virtual TSelf Xor(TSelf left, TSelf right) => left ^ right; + + // + // New Surface Area + // + + static abstract int IndexOfLastMatch(TSelf vector); + } +} diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/SimdVectorExtensions.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/SimdVectorExtensions.cs new file mode 100644 index 0000000000000..f483b9ccb6d36 --- /dev/null +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/SimdVectorExtensions.cs @@ -0,0 +1,171 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +namespace System.Runtime.Intrinsics +{ + internal static unsafe class SimdVectorExtensions + { + // TODO: As + + /// Copies a vector to a given array. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector to be copied. + /// The array to which is copied. + /// The length of is less than . + /// is null. + /// The type of the elements in the vector () is not supported. + public static void CopyTo(this TVector vector, T[] destination) + where TVector : ISimdVector + { + TVector.CopyTo(vector, destination); + } + + /// Copies a vector to a given array starting at the specified index. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector to be copied. + /// The array to which is copied. + /// The starting index of which will be copied to. + /// The length of is less than . + /// is negative or greater than the length of . + /// is null. + /// The type of the elements in the vector () is not supported. + public static void CopyTo(this TVector vector, T[] destination, int startIndex) + where TVector : ISimdVector + { + TVector.CopyTo(vector, destination, startIndex); + } + + /// Copies a vector to a given span. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector to be copied. + /// The span to which the is copied. + /// The length of is less than . + /// The type of the elements in the vector () is not supported. + public static void CopyTo(this TVector vector, Span destination) + where TVector : ISimdVector + { + TVector.CopyTo(vector, destination); + } + + /// Gets the element at the specified index. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector to get the element from. + /// The index of the element to get. + /// The value of the element at . + /// was less than zero or greater than the number of elements. + /// The type of () is not supported. + public static T GetElement(this TVector vector, int index) + where TVector : ISimdVector + { + return TVector.GetElement(vector, index); + } + +#pragma warning disable CS8500 // This takes the address of, gets the size of, or declares a pointer to a managed type ('T') + /// Stores a vector at the given destination. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector that will be stored. + /// The destination at which will be stored. + /// The type of () is not supported. + public static void Store(this TVector source, T* destination) + where TVector : ISimdVector + { + TVector.Store(source, destination); + } + + /// Stores a vector at the given aligned destination. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector that will be stored. + /// The aligned destination at which will be stored. + /// The type of () is not supported. + public static void StoreAligned(this TVector source, T* destination) + where TVector : ISimdVector + { + TVector.StoreAligned(source, destination); + } + + /// Stores a vector at the given aligned destination. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector that will be stored. + /// The aligned destination at which will be stored. + /// This method may bypass the cache on certain platforms. + /// The type of () is not supported. + public static void StoreAlignedNonTemporal(this TVector source, T* destination) + where TVector : ISimdVector + { + TVector.StoreAlignedNonTemporal(source, destination); + } +#pragma warning restore CS8500 // This takes the address of, gets the size of, or declares a pointer to a managed type ('T') + + /// Stores a vector at the given destination. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector that will be stored. + /// The destination at which the vector will be stored. + /// The type of the elements in the vector () is not supported. + public static void StoreUnsafe(this TVector vector, ref T destination) + where TVector : ISimdVector + { + TVector.StoreUnsafe(vector, ref destination); + } + + /// Stores a vector at the given destination. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector that will be stored. + /// The destination to which will be added before the vector will be stored. + /// The element offset from from which the vector will be stored. + /// The type of the elements in the vector () is not supported. + public static void StoreUnsafe(this TVector vector, ref T destination, nuint elementOffset) + where TVector : ISimdVector + { + TVector.StoreUnsafe(vector, ref destination, elementOffset); + } + + /// Converts the given vector to a scalar containing the value of the first element. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector to get the first element from. + /// A scalar containing the value of the first element. + /// The type of the elements in the vector () is not supported. + public static T ToScalar(this TVector vector) + where TVector : ISimdVector + { + return TVector.ToScalar(vector); + } + + /// Tries to copy a vector to a given span. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector to copy. + /// The span to which is copied. + /// true if was successfully copied to ; otherwise, false if the length of is less than . + /// The type of the elements in the vector () is not supported. + public static bool TryCopyTo(this TVector vector, Span destination) + where TVector : ISimdVector + { + return TVector.TryCopyTo(vector, destination); + } + + /// Creates a new vector with the element at the specified index set to the specified value and the remaining elements set to the same value as that in the given vector. + /// The type of the vector. + /// The type of the elements in the vector. + /// The vector to get the remaining elements from. + /// The index of the element to set. + /// The value to set the element to. + /// A vector with the value of the element at set to and the remaining elements set to the same value as that in . + /// was less than zero or greater than the number of elements. + /// The type of the elements in the vector () is not supported. + public static TVector WithElement(this TVector vector, int index, T value) + where TVector : ISimdVector + { + return TVector.WithElement(vector, index, value); + } + } +} diff --git a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Vector128.cs b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Vector128.cs index 90ef3cf84b008..3f57e5c50f444 100644 --- a/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Vector128.cs +++ b/src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Vector128.cs @@ -344,10 +344,9 @@ public static Vector AsVector(this Vector128 value) /// Computes the ceiling of each element in a vector. /// The vector that will have its ceiling computed. /// A vector whose elements are the ceiling of the elements in . - /// [Intrinsic] [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static Vector128 Ceiling(Vector128 vector) + internal static Vector128 Ceiling(Vector128 vector) { return Create( Vector64.Ceiling(vector._lower), @@ -355,19 +354,21 @@ public static Vector128 Ceiling(Vector128 vector) ); } + /// Computes the ceiling of each element in a vector. + /// The vector that will have its ceiling computed. + /// A vector whose elements are the ceiling of the elements in . + /// + [Intrinsic] + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector128 Ceiling(Vector128 vector) => Ceiling(vector); + /// Computes the ceiling of each element in a vector. /// The vector that will have its ceiling computed. /// A vector whose elements are the ceiling of the elements in . /// [Intrinsic] [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static Vector128 Ceiling(Vector128 vector) - { - return Create( - Vector64.Ceiling(vector._lower), - Vector64.Ceiling(vector._upper) - ); - } + public static Vector128 Ceiling(Vector128 vector) => Ceiling(vector); /// Conditionally selects a value from two vectors on a bitwise basis. /// The type of the elements in the vector. @@ -1443,10 +1444,9 @@ public static uint ExtractMostSignificantBits(this Vector128 vector) /// Computes the floor of each element in a vector. /// The vector that will have its floor computed. /// A vector whose elements are the floor of the elements in . - /// [Intrinsic] [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static Vector128 Floor(Vector128 vector) + internal static Vector128 Floor(Vector128 vector) { return Create( Vector64.Floor(vector._lower), @@ -1454,19 +1454,21 @@ public static Vector128 Floor(Vector128 vector) ); } + /// Computes the floor of each element in a vector. + /// The vector that will have its floor computed. + /// A vector whose elements are the floor of the elements in . + /// + [Intrinsic] + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public static Vector128 Floor(Vector128 vector) => Floor(vector); + /// Computes the floor of each element in a vector. /// The vector that will have its floor computed. /// A vector whose elements are the floor of the elements in . /// [Intrinsic] [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static Vector128 Floor(Vector128 vector) - { - return Create( - Vector64.Floor(vector._lower), - Vector64.Floor(vector._upper) - ); - } + public static Vector128 Floor(Vector128 vector) => Floor(vector); /// Gets the element at the specified index. /// The type of the elements in the vector. @@ -1967,13 +1969,7 @@ public static unsafe Vector128 Narrow(Vector128 lower, Vector128